1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 3. Neither the names of the above-listed copyright holders nor the names 16 * of any contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * NO WARRANTY 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 27 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 28 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 29 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 32 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGES. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 /* 41 * Driver for the Atheros Wireless LAN controller. 42 * 43 * This software is derived from work of Atsushi Onoe; his contribution 44 * is greatly appreciated. 45 */ 46 47 #include "opt_inet.h" 48 #include "opt_ath.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/sysctl.h> 53 #include <sys/mbuf.h> 54 #include <sys/malloc.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/kernel.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/errno.h> 61 #include <sys/callout.h> 62 #include <sys/bus.h> 63 #include <sys/endian.h> 64 #include <sys/kthread.h> 65 #include <sys/taskqueue.h> 66 67 #include <machine/bus.h> 68 69 #include <net/if.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_llc.h> 76 77 #include <net80211/ieee80211_var.h> 78 79 #include <net/bpf.h> 80 81 #ifdef INET 82 #include <netinet/in.h> 83 #include <netinet/if_ether.h> 84 #endif 85 86 #include <dev/ath/if_athvar.h> 87 #include <contrib/dev/ath/ah_desc.h> 88 #include <contrib/dev/ath/ah_devid.h> /* XXX for softled */ 89 90 #ifdef ATH_TX99_DIAG 91 #include <dev/ath/ath_tx99/ath_tx99.h> 92 #endif 93 94 /* unaligned little endian access */ 95 #define LE_READ_2(p) \ 96 ((u_int16_t) \ 97 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 98 #define LE_READ_4(p) \ 99 ((u_int32_t) \ 100 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 101 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 102 103 enum { 104 ATH_LED_TX, 105 ATH_LED_RX, 106 ATH_LED_POLL, 107 }; 108 109 static void ath_init(void *); 110 static void ath_stop_locked(struct ifnet *); 111 static void ath_stop(struct ifnet *); 112 static void ath_start(struct ifnet *); 113 static int ath_reset(struct ifnet *); 114 static int ath_media_change(struct ifnet *); 115 static void ath_watchdog(struct ifnet *); 116 static int ath_ioctl(struct ifnet *, u_long, caddr_t); 117 static void ath_fatal_proc(void *, int); 118 static void ath_rxorn_proc(void *, int); 119 static void ath_bmiss_proc(void *, int); 120 static int ath_key_alloc(struct ieee80211com *, 121 const struct ieee80211_key *, 122 ieee80211_keyix *, ieee80211_keyix *); 123 static int ath_key_delete(struct ieee80211com *, 124 const struct ieee80211_key *); 125 static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, 126 const u_int8_t mac[IEEE80211_ADDR_LEN]); 127 static void ath_key_update_begin(struct ieee80211com *); 128 static void ath_key_update_end(struct ieee80211com *); 129 static void ath_mode_init(struct ath_softc *); 130 static void ath_setslottime(struct ath_softc *); 131 static void ath_updateslot(struct ifnet *); 132 static int ath_beaconq_setup(struct ath_hal *); 133 static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 134 static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 135 static void ath_beacon_proc(void *, int); 136 static void ath_bstuck_proc(void *, int); 137 static void ath_beacon_free(struct ath_softc *); 138 static void ath_beacon_config(struct ath_softc *); 139 static void ath_descdma_cleanup(struct ath_softc *sc, 140 struct ath_descdma *, ath_bufhead *); 141 static int ath_desc_alloc(struct ath_softc *); 142 static void ath_desc_free(struct ath_softc *); 143 static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); 144 static void ath_node_free(struct ieee80211_node *); 145 static u_int8_t ath_node_getrssi(const struct ieee80211_node *); 146 static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 147 static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 148 struct ieee80211_node *ni, 149 int subtype, int rssi, u_int32_t rstamp); 150 static void ath_setdefantenna(struct ath_softc *, u_int); 151 static void ath_rx_proc(void *, int); 152 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 153 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 154 static int ath_tx_setup(struct ath_softc *, int, int); 155 static int ath_wme_update(struct ieee80211com *); 156 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 157 static void ath_tx_cleanup(struct ath_softc *); 158 static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 159 struct ath_buf *, struct mbuf *); 160 static void ath_tx_proc_q0(void *, int); 161 static void ath_tx_proc_q0123(void *, int); 162 static void ath_tx_proc(void *, int); 163 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 164 static void ath_draintxq(struct ath_softc *); 165 static void ath_stoprecv(struct ath_softc *); 166 static int ath_startrecv(struct ath_softc *); 167 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 168 static void ath_next_scan(void *); 169 static void ath_calibrate(void *); 170 static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 171 static void ath_setup_stationkey(struct ieee80211_node *); 172 static void ath_newassoc(struct ieee80211_node *, int); 173 static int ath_getchannels(struct ath_softc *, 174 HAL_REG_DOMAIN, HAL_CTRY_CODE, HAL_BOOL, HAL_BOOL); 175 static void ath_led_event(struct ath_softc *, int); 176 static void ath_update_txpow(struct ath_softc *); 177 178 static int ath_rate_setup(struct ath_softc *, u_int mode); 179 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 180 181 static void ath_sysctlattach(struct ath_softc *); 182 static int ath_raw_xmit(struct ieee80211_node *, 183 struct mbuf *, const struct ieee80211_bpf_params *); 184 static void ath_bpfattach(struct ath_softc *); 185 static void ath_announce(struct ath_softc *); 186 187 SYSCTL_DECL(_hw_ath); 188 189 /* XXX validate sysctl values */ 190 static int ath_dwelltime = 200; /* 5 channels/second */ 191 SYSCTL_INT(_hw_ath, OID_AUTO, dwell, CTLFLAG_RW, &ath_dwelltime, 192 0, "channel dwell time (ms) for AP/station scanning"); 193 static int ath_calinterval = 30; /* calibrate every 30 secs */ 194 SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 195 0, "chip calibration interval (secs)"); 196 static int ath_outdoor = AH_TRUE; /* outdoor operation */ 197 SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RW, &ath_outdoor, 198 0, "outdoor operation"); 199 TUNABLE_INT("hw.ath.outdoor", &ath_outdoor); 200 static int ath_xchanmode = AH_TRUE; /* extended channel use */ 201 SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RW, &ath_xchanmode, 202 0, "extended channel mode"); 203 TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode); 204 static int ath_countrycode = CTRY_DEFAULT; /* country code */ 205 SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RW, &ath_countrycode, 206 0, "country code"); 207 TUNABLE_INT("hw.ath.countrycode", &ath_countrycode); 208 static int ath_regdomain = 0; /* regulatory domain */ 209 SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain, 210 0, "regulatory domain"); 211 212 static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 213 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 214 0, "rx buffers allocated"); 215 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 216 static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 217 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 218 0, "tx buffers allocated"); 219 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 220 221 #ifdef ATH_DEBUG 222 static int ath_debug = 0; 223 SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 224 0, "control debugging printfs"); 225 TUNABLE_INT("hw.ath.debug", &ath_debug); 226 enum { 227 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 228 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 229 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 230 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 231 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 232 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 233 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 234 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 235 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 236 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 237 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 238 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 239 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 240 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 241 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 242 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 243 ATH_DEBUG_NODE = 0x00080000, /* node management */ 244 ATH_DEBUG_LED = 0x00100000, /* led management */ 245 ATH_DEBUG_FF = 0x00200000, /* fast frames */ 246 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */ 247 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 248 ATH_DEBUG_ANY = 0xffffffff 249 }; 250 #define IFF_DUMPPKTS(sc, m) \ 251 ((sc->sc_debug & (m)) || \ 252 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 253 #define DPRINTF(sc, m, fmt, ...) do { \ 254 if (sc->sc_debug & (m)) \ 255 printf(fmt, __VA_ARGS__); \ 256 } while (0) 257 #define KEYPRINTF(sc, ix, hk, mac) do { \ 258 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 259 ath_keyprint(sc, __func__, ix, hk, mac); \ 260 } while (0) 261 static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int); 262 static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done); 263 #else 264 #define IFF_DUMPPKTS(sc, m) \ 265 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 266 #define DPRINTF(sc, m, fmt, ...) do { \ 267 (void) sc; \ 268 } while (0) 269 #define KEYPRINTF(sc, k, ix, mac) do { \ 270 (void) sc; \ 271 } while (0) 272 #endif 273 274 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 275 276 int 277 ath_attach(u_int16_t devid, struct ath_softc *sc) 278 { 279 struct ifnet *ifp; 280 struct ieee80211com *ic = &sc->sc_ic; 281 struct ath_hal *ah = NULL; 282 HAL_STATUS status; 283 int error = 0, i; 284 285 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 286 287 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 288 if (ifp == NULL) { 289 device_printf(sc->sc_dev, "can not if_alloc()\n"); 290 error = ENOSPC; 291 goto bad; 292 } 293 294 /* set these up early for if_printf use */ 295 if_initname(ifp, device_get_name(sc->sc_dev), 296 device_get_unit(sc->sc_dev)); 297 298 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 299 if (ah == NULL) { 300 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 301 status); 302 error = ENXIO; 303 goto bad; 304 } 305 if (ah->ah_abi != HAL_ABI_VERSION) { 306 if_printf(ifp, "HAL ABI mismatch detected " 307 "(HAL:0x%x != driver:0x%x)\n", 308 ah->ah_abi, HAL_ABI_VERSION); 309 error = ENXIO; 310 goto bad; 311 } 312 sc->sc_ah = ah; 313 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 314 315 /* 316 * Check if the MAC has multi-rate retry support. 317 * We do this by trying to setup a fake extended 318 * descriptor. MAC's that don't have support will 319 * return false w/o doing anything. MAC's that do 320 * support it will return true w/o doing anything. 321 */ 322 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 323 324 /* 325 * Check if the device has hardware counters for PHY 326 * errors. If so we need to enable the MIB interrupt 327 * so we can act on stat triggers. 328 */ 329 if (ath_hal_hwphycounters(ah)) 330 sc->sc_needmib = 1; 331 332 /* 333 * Get the hardware key cache size. 334 */ 335 sc->sc_keymax = ath_hal_keycachesize(ah); 336 if (sc->sc_keymax > ATH_KEYMAX) { 337 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 338 ATH_KEYMAX, sc->sc_keymax); 339 sc->sc_keymax = ATH_KEYMAX; 340 } 341 /* 342 * Reset the key cache since some parts do not 343 * reset the contents on initial power up. 344 */ 345 for (i = 0; i < sc->sc_keymax; i++) 346 ath_hal_keyreset(ah, i); 347 348 /* 349 * Collect the channel list using the default country 350 * code and including outdoor channels. The 802.11 layer 351 * is resposible for filtering this list based on settings 352 * like the phy mode. 353 */ 354 error = ath_getchannels(sc, ath_regdomain, ath_countrycode, 355 ath_xchanmode != 0, ath_outdoor != 0); 356 if (error != 0) 357 goto bad; 358 359 /* 360 * Setup rate tables for all potential media types. 361 */ 362 ath_rate_setup(sc, IEEE80211_MODE_11A); 363 ath_rate_setup(sc, IEEE80211_MODE_11B); 364 ath_rate_setup(sc, IEEE80211_MODE_11G); 365 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 366 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 367 ath_rate_setup(sc, IEEE80211_MODE_HALF); 368 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 369 370 /* NB: setup here so ath_rate_update is happy */ 371 ath_setcurmode(sc, IEEE80211_MODE_11A); 372 373 /* 374 * Allocate tx+rx descriptors and populate the lists. 375 */ 376 error = ath_desc_alloc(sc); 377 if (error != 0) { 378 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 379 goto bad; 380 } 381 callout_init(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); 382 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); 383 callout_init(&sc->sc_dfs_ch, CALLOUT_MPSAFE); 384 385 ATH_TXBUF_LOCK_INIT(sc); 386 387 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 388 taskqueue_thread_enqueue, &sc->sc_tq); 389 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 390 "%s taskq", ifp->if_xname); 391 392 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 393 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 394 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 395 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 396 397 /* 398 * Allocate hardware transmit queues: one queue for 399 * beacon frames and one data queue for each QoS 400 * priority. Note that the hal handles reseting 401 * these queues at the needed time. 402 * 403 * XXX PS-Poll 404 */ 405 sc->sc_bhalq = ath_beaconq_setup(ah); 406 if (sc->sc_bhalq == (u_int) -1) { 407 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 408 error = EIO; 409 goto bad2; 410 } 411 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 412 if (sc->sc_cabq == NULL) { 413 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 414 error = EIO; 415 goto bad2; 416 } 417 ath_txq_init(sc, &sc->sc_mcastq, -1); /* NB: s/w q, qnum not used */ 418 /* NB: insure BK queue is the lowest priority h/w queue */ 419 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 420 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 421 ieee80211_wme_acnames[WME_AC_BK]); 422 error = EIO; 423 goto bad2; 424 } 425 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 426 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 427 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 428 /* 429 * Not enough hardware tx queues to properly do WME; 430 * just punt and assign them all to the same h/w queue. 431 * We could do a better job of this if, for example, 432 * we allocate queues when we switch from station to 433 * AP mode. 434 */ 435 if (sc->sc_ac2q[WME_AC_VI] != NULL) 436 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 437 if (sc->sc_ac2q[WME_AC_BE] != NULL) 438 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 439 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 440 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 441 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 442 } 443 444 /* 445 * Special case certain configurations. Note the 446 * CAB queue is handled by these specially so don't 447 * include them when checking the txq setup mask. 448 */ 449 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 450 case 0x01: 451 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 452 break; 453 case 0x0f: 454 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 455 break; 456 default: 457 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 458 break; 459 } 460 461 /* 462 * Setup rate control. Some rate control modules 463 * call back to change the anntena state so expose 464 * the necessary entry points. 465 * XXX maybe belongs in struct ath_ratectrl? 466 */ 467 sc->sc_setdefantenna = ath_setdefantenna; 468 sc->sc_rc = ath_rate_attach(sc); 469 if (sc->sc_rc == NULL) { 470 error = EIO; 471 goto bad2; 472 } 473 474 sc->sc_blinking = 0; 475 sc->sc_ledstate = 1; 476 sc->sc_ledon = 0; /* low true */ 477 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 478 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 479 /* 480 * Auto-enable soft led processing for IBM cards and for 481 * 5211 minipci cards. Users can also manually enable/disable 482 * support with a sysctl. 483 */ 484 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 485 if (sc->sc_softled) { 486 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 487 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 488 } 489 490 ifp->if_softc = sc; 491 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 492 ifp->if_start = ath_start; 493 ifp->if_watchdog = ath_watchdog; 494 ifp->if_ioctl = ath_ioctl; 495 ifp->if_init = ath_init; 496 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 497 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 498 IFQ_SET_READY(&ifp->if_snd); 499 500 ic->ic_ifp = ifp; 501 ic->ic_reset = ath_reset; 502 ic->ic_newassoc = ath_newassoc; 503 ic->ic_updateslot = ath_updateslot; 504 ic->ic_wme.wme_update = ath_wme_update; 505 /* XXX not right but it's not used anywhere important */ 506 ic->ic_phytype = IEEE80211_T_OFDM; 507 ic->ic_opmode = IEEE80211_M_STA; 508 ic->ic_caps = 509 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 510 | IEEE80211_C_HOSTAP /* hostap mode */ 511 | IEEE80211_C_MONITOR /* monitor mode */ 512 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 513 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 514 | IEEE80211_C_SHSLOT /* short slot time supported */ 515 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 516 ; 517 /* 518 * Query the hal to figure out h/w crypto support. 519 */ 520 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 521 ic->ic_caps |= IEEE80211_C_WEP; 522 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 523 ic->ic_caps |= IEEE80211_C_AES; 524 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 525 ic->ic_caps |= IEEE80211_C_AES_CCM; 526 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 527 ic->ic_caps |= IEEE80211_C_CKIP; 528 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 529 ic->ic_caps |= IEEE80211_C_TKIP; 530 /* 531 * Check if h/w does the MIC and/or whether the 532 * separate key cache entries are required to 533 * handle both tx+rx MIC keys. 534 */ 535 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 536 ic->ic_caps |= IEEE80211_C_TKIPMIC; 537 /* 538 * If the h/w supports storing tx+rx MIC keys 539 * in one cache slot automatically enable use. 540 */ 541 if (ath_hal_hastkipsplit(ah) || 542 !ath_hal_settkipsplit(ah, AH_FALSE)) 543 sc->sc_splitmic = 1; 544 } 545 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 546 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 547 /* 548 * Mark key cache slots associated with global keys 549 * as in use. If we knew TKIP was not to be used we 550 * could leave the +32, +64, and +32+64 slots free. 551 */ 552 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 553 setbit(sc->sc_keymap, i); 554 setbit(sc->sc_keymap, i+64); 555 if (sc->sc_splitmic) { 556 setbit(sc->sc_keymap, i+32); 557 setbit(sc->sc_keymap, i+32+64); 558 } 559 } 560 /* 561 * TPC support can be done either with a global cap or 562 * per-packet support. The latter is not available on 563 * all parts. We're a bit pedantic here as all parts 564 * support a global cap. 565 */ 566 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 567 ic->ic_caps |= IEEE80211_C_TXPMGT; 568 569 /* 570 * Mark WME capability only if we have sufficient 571 * hardware queues to do proper priority scheduling. 572 */ 573 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 574 ic->ic_caps |= IEEE80211_C_WME; 575 /* 576 * Check for misc other capabilities. 577 */ 578 if (ath_hal_hasbursting(ah)) 579 ic->ic_caps |= IEEE80211_C_BURST; 580 581 /* 582 * Indicate we need the 802.11 header padded to a 583 * 32-bit boundary for 4-address and QoS frames. 584 */ 585 ic->ic_flags |= IEEE80211_F_DATAPAD; 586 587 /* 588 * Query the hal about antenna support. 589 */ 590 sc->sc_defant = ath_hal_getdefantenna(ah); 591 592 /* 593 * Not all chips have the VEOL support we want to 594 * use with IBSS beacons; check here for it. 595 */ 596 sc->sc_hasveol = ath_hal_hasveol(ah); 597 598 /* get mac address from hardware */ 599 ath_hal_getmac(ah, ic->ic_myaddr); 600 601 /* call MI attach routine. */ 602 ieee80211_ifattach(ic); 603 sc->sc_opmode = ic->ic_opmode; 604 /* override default methods */ 605 ic->ic_node_alloc = ath_node_alloc; 606 sc->sc_node_free = ic->ic_node_free; 607 ic->ic_node_free = ath_node_free; 608 ic->ic_node_getrssi = ath_node_getrssi; 609 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 610 ic->ic_recv_mgmt = ath_recv_mgmt; 611 sc->sc_newstate = ic->ic_newstate; 612 ic->ic_newstate = ath_newstate; 613 ic->ic_crypto.cs_max_keyix = sc->sc_keymax; 614 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 615 ic->ic_crypto.cs_key_delete = ath_key_delete; 616 ic->ic_crypto.cs_key_set = ath_key_set; 617 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; 618 ic->ic_crypto.cs_key_update_end = ath_key_update_end; 619 ic->ic_raw_xmit = ath_raw_xmit; 620 /* complete initialization */ 621 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 622 623 ath_bpfattach(sc); 624 /* 625 * Setup dynamic sysctl's now that country code and 626 * regdomain are available from the hal. 627 */ 628 ath_sysctlattach(sc); 629 630 if (bootverbose) 631 ieee80211_announce(ic); 632 ath_announce(sc); 633 return 0; 634 bad2: 635 ath_tx_cleanup(sc); 636 ath_desc_free(sc); 637 bad: 638 if (ah) 639 ath_hal_detach(ah); 640 if (ifp != NULL) 641 if_free(ifp); 642 sc->sc_invalid = 1; 643 return error; 644 } 645 646 int 647 ath_detach(struct ath_softc *sc) 648 { 649 struct ifnet *ifp = sc->sc_ifp; 650 651 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 652 __func__, ifp->if_flags); 653 654 ath_stop(ifp); 655 bpfdetach(ifp); 656 /* 657 * NB: the order of these is important: 658 * o call the 802.11 layer before detaching the hal to 659 * insure callbacks into the driver to delete global 660 * key cache entries can be handled 661 * o reclaim the tx queue data structures after calling 662 * the 802.11 layer as we'll get called back to reclaim 663 * node state and potentially want to use them 664 * o to cleanup the tx queues the hal is called, so detach 665 * it last 666 * Other than that, it's straightforward... 667 */ 668 ieee80211_ifdetach(&sc->sc_ic); 669 #ifdef ATH_TX99_DIAG 670 if (sc->sc_tx99 != NULL) 671 sc->sc_tx99->detach(sc->sc_tx99); 672 #endif 673 taskqueue_free(sc->sc_tq); 674 ath_rate_detach(sc->sc_rc); 675 ath_desc_free(sc); 676 ath_tx_cleanup(sc); 677 ath_hal_detach(sc->sc_ah); 678 if_free(ifp); 679 680 return 0; 681 } 682 683 void 684 ath_suspend(struct ath_softc *sc) 685 { 686 struct ifnet *ifp = sc->sc_ifp; 687 688 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 689 __func__, ifp->if_flags); 690 691 ath_stop(ifp); 692 } 693 694 void 695 ath_resume(struct ath_softc *sc) 696 { 697 struct ifnet *ifp = sc->sc_ifp; 698 699 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 700 __func__, ifp->if_flags); 701 702 if (ifp->if_flags & IFF_UP) { 703 ath_init(sc); 704 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 705 ath_start(ifp); 706 } 707 if (sc->sc_softled) { 708 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 709 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 710 } 711 } 712 713 void 714 ath_shutdown(struct ath_softc *sc) 715 { 716 struct ifnet *ifp = sc->sc_ifp; 717 718 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 719 __func__, ifp->if_flags); 720 721 ath_stop(ifp); 722 } 723 724 /* 725 * Interrupt handler. Most of the actual processing is deferred. 726 */ 727 void 728 ath_intr(void *arg) 729 { 730 struct ath_softc *sc = arg; 731 struct ifnet *ifp = sc->sc_ifp; 732 struct ath_hal *ah = sc->sc_ah; 733 HAL_INT status; 734 735 if (sc->sc_invalid) { 736 /* 737 * The hardware is not ready/present, don't touch anything. 738 * Note this can happen early on if the IRQ is shared. 739 */ 740 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 741 return; 742 } 743 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 744 return; 745 if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & 746 IFF_DRV_RUNNING))) { 747 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 748 __func__, ifp->if_flags); 749 ath_hal_getisr(ah, &status); /* clear ISR */ 750 ath_hal_intrset(ah, 0); /* disable further intr's */ 751 return; 752 } 753 /* 754 * Figure out the reason(s) for the interrupt. Note 755 * that the hal returns a pseudo-ISR that may include 756 * bits we haven't explicitly enabled so we mask the 757 * value to insure we only process bits we requested. 758 */ 759 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 760 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 761 status &= sc->sc_imask; /* discard unasked for bits */ 762 if (status & HAL_INT_FATAL) { 763 sc->sc_stats.ast_hardware++; 764 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 765 ath_fatal_proc(sc, 0); 766 } else if (status & HAL_INT_RXORN) { 767 sc->sc_stats.ast_rxorn++; 768 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 769 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxorntask); 770 } else { 771 if (status & HAL_INT_SWBA) { 772 /* 773 * Software beacon alert--time to send a beacon. 774 * Handle beacon transmission directly; deferring 775 * this is too slow to meet timing constraints 776 * under load. 777 */ 778 ath_beacon_proc(sc, 0); 779 } 780 if (status & HAL_INT_RXEOL) { 781 /* 782 * NB: the hardware should re-read the link when 783 * RXE bit is written, but it doesn't work at 784 * least on older hardware revs. 785 */ 786 sc->sc_stats.ast_rxeol++; 787 sc->sc_rxlink = NULL; 788 } 789 if (status & HAL_INT_TXURN) { 790 sc->sc_stats.ast_txurn++; 791 /* bump tx trigger level */ 792 ath_hal_updatetxtriglevel(ah, AH_TRUE); 793 } 794 if (status & HAL_INT_RX) 795 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 796 if (status & HAL_INT_TX) 797 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 798 if (status & HAL_INT_BMISS) { 799 sc->sc_stats.ast_bmiss++; 800 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 801 } 802 if (status & HAL_INT_MIB) { 803 sc->sc_stats.ast_mib++; 804 /* 805 * Disable interrupts until we service the MIB 806 * interrupt; otherwise it will continue to fire. 807 */ 808 ath_hal_intrset(ah, 0); 809 /* 810 * Let the hal handle the event. We assume it will 811 * clear whatever condition caused the interrupt. 812 */ 813 ath_hal_mibevent(ah, &sc->sc_halstats); 814 ath_hal_intrset(ah, sc->sc_imask); 815 } 816 } 817 } 818 819 static void 820 ath_fatal_proc(void *arg, int pending) 821 { 822 struct ath_softc *sc = arg; 823 struct ifnet *ifp = sc->sc_ifp; 824 u_int32_t *state; 825 u_int32_t len; 826 827 if_printf(ifp, "hardware error; resetting\n"); 828 /* 829 * Fatal errors are unrecoverable. Typically these 830 * are caused by DMA errors. Collect h/w state from 831 * the hal so we can diagnose what's going on. 832 */ 833 if (ath_hal_getfatalstate(sc->sc_ah, &state, &len)) { 834 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 835 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 836 state[0], state[1] , state[2], state[3], 837 state[4], state[5]); 838 } 839 ath_reset(ifp); 840 } 841 842 static void 843 ath_rxorn_proc(void *arg, int pending) 844 { 845 struct ath_softc *sc = arg; 846 struct ifnet *ifp = sc->sc_ifp; 847 848 if_printf(ifp, "rx FIFO overrun; resetting\n"); 849 ath_reset(ifp); 850 } 851 852 static void 853 ath_bmiss_proc(void *arg, int pending) 854 { 855 struct ath_softc *sc = arg; 856 struct ieee80211com *ic = &sc->sc_ic; 857 858 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 859 KASSERT(ic->ic_opmode == IEEE80211_M_STA, 860 ("unexpect operating mode %u", ic->ic_opmode)); 861 if (ic->ic_state == IEEE80211_S_RUN) { 862 u_int64_t lastrx = sc->sc_lastrx; 863 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 864 u_int bmisstimeout = 865 ic->ic_bmissthreshold * ic->ic_bss->ni_intval * 1024; 866 867 DPRINTF(sc, ATH_DEBUG_BEACON, 868 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 869 __func__, (unsigned long long) tsf, 870 (unsigned long long)(tsf - lastrx), 871 (unsigned long long) lastrx, bmisstimeout); 872 /* 873 * Workaround phantom bmiss interrupts by sanity-checking 874 * the time of our last rx'd frame. If it is within the 875 * beacon miss interval then ignore the interrupt. If it's 876 * truly a bmiss we'll get another interrupt soon and that'll 877 * be dispatched up for processing. 878 */ 879 if (tsf - lastrx > bmisstimeout) { 880 NET_LOCK_GIANT(); 881 ieee80211_beacon_miss(ic); 882 NET_UNLOCK_GIANT(); 883 } else 884 sc->sc_stats.ast_bmiss_phantom++; 885 } 886 } 887 888 /* 889 * Convert net80211 channel to a HAL channel with the flags 890 * constrained to reflect the current operating mode and 891 * the frequency possibly mapped for GSM channels. 892 */ 893 static void 894 ath_mapchan(struct ieee80211com *ic, HAL_CHANNEL *hc, 895 const struct ieee80211_channel *chan) 896 { 897 #define N(a) (sizeof(a) / sizeof(a[0])) 898 static const u_int modeflags[] = { 899 0, /* IEEE80211_MODE_AUTO */ 900 CHANNEL_A, /* IEEE80211_MODE_11A */ 901 CHANNEL_B, /* IEEE80211_MODE_11B */ 902 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 903 0, /* IEEE80211_MODE_FH */ 904 CHANNEL_ST, /* IEEE80211_MODE_TURBO_A */ 905 CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ 906 }; 907 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 908 909 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 910 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 911 hc->channelFlags = modeflags[mode]; 912 if (IEEE80211_IS_CHAN_HALF(chan)) 913 hc->channelFlags |= CHANNEL_HALF; 914 if (IEEE80211_IS_CHAN_QUARTER(chan)) 915 hc->channelFlags |= CHANNEL_QUARTER; 916 917 hc->channel = IEEE80211_IS_CHAN_GSM(chan) ? 918 2422 + (922 - chan->ic_freq) : chan->ic_freq; 919 #undef N 920 } 921 922 static void 923 ath_init(void *arg) 924 { 925 struct ath_softc *sc = (struct ath_softc *) arg; 926 struct ieee80211com *ic = &sc->sc_ic; 927 struct ifnet *ifp = sc->sc_ifp; 928 struct ath_hal *ah = sc->sc_ah; 929 HAL_STATUS status; 930 931 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 932 __func__, ifp->if_flags); 933 934 ATH_LOCK(sc); 935 /* 936 * Stop anything previously setup. This is safe 937 * whether this is the first time through or not. 938 */ 939 ath_stop_locked(ifp); 940 941 /* 942 * The basic interface to setting the hardware in a good 943 * state is ``reset''. On return the hardware is known to 944 * be powered up and with interrupts disabled. This must 945 * be followed by initialization of the appropriate bits 946 * and then setup of the interrupt mask. 947 */ 948 ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan); 949 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 950 if_printf(ifp, "unable to reset hardware; hal status %u\n", 951 status); 952 goto done; 953 } 954 955 /* 956 * This is needed only to setup initial state 957 * but it's best done after a reset. 958 */ 959 ath_update_txpow(sc); 960 /* 961 * Likewise this is set during reset so update 962 * state cached in the driver. 963 */ 964 sc->sc_diversity = ath_hal_getdiversity(ah); 965 sc->sc_calinterval = 1; 966 sc->sc_caltries = 0; 967 968 /* 969 * Setup the hardware after reset: the key cache 970 * is filled as needed and the receive engine is 971 * set going. Frame transmit is handled entirely 972 * in the frame output path; there's nothing to do 973 * here except setup the interrupt mask. 974 */ 975 if (ath_startrecv(sc) != 0) { 976 if_printf(ifp, "unable to start recv logic\n"); 977 goto done; 978 } 979 980 /* 981 * Enable interrupts. 982 */ 983 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 984 | HAL_INT_RXEOL | HAL_INT_RXORN 985 | HAL_INT_FATAL | HAL_INT_GLOBAL; 986 /* 987 * Enable MIB interrupts when there are hardware phy counters. 988 * Note we only do this (at the moment) for station mode. 989 */ 990 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 991 sc->sc_imask |= HAL_INT_MIB; 992 ath_hal_intrset(ah, sc->sc_imask); 993 994 ifp->if_drv_flags |= IFF_DRV_RUNNING; 995 ic->ic_state = IEEE80211_S_INIT; 996 997 /* 998 * The hardware should be ready to go now so it's safe 999 * to kick the 802.11 state machine as it's likely to 1000 * immediately call back to us to send mgmt frames. 1001 */ 1002 ath_chan_change(sc, ic->ic_curchan); 1003 #ifdef ATH_TX99_DIAG 1004 if (sc->sc_tx99 != NULL) 1005 sc->sc_tx99->start(sc->sc_tx99); 1006 else 1007 #endif 1008 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1009 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 1010 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 1011 } else 1012 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 1013 done: 1014 ATH_UNLOCK(sc); 1015 } 1016 1017 static void 1018 ath_stop_locked(struct ifnet *ifp) 1019 { 1020 struct ath_softc *sc = ifp->if_softc; 1021 struct ieee80211com *ic = &sc->sc_ic; 1022 struct ath_hal *ah = sc->sc_ah; 1023 1024 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1025 __func__, sc->sc_invalid, ifp->if_flags); 1026 1027 ATH_LOCK_ASSERT(sc); 1028 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1029 /* 1030 * Shutdown the hardware and driver: 1031 * reset 802.11 state machine 1032 * turn off timers 1033 * disable interrupts 1034 * turn off the radio 1035 * clear transmit machinery 1036 * clear receive machinery 1037 * drain and release tx queues 1038 * reclaim beacon resources 1039 * power down hardware 1040 * 1041 * Note that some of this work is not possible if the 1042 * hardware is gone (invalid). 1043 */ 1044 #ifdef ATH_TX99_DIAG 1045 if (sc->sc_tx99 != NULL) 1046 sc->sc_tx99->stop(sc->sc_tx99); 1047 #endif 1048 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1049 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1050 ifp->if_timer = 0; 1051 if (!sc->sc_invalid) { 1052 if (sc->sc_softled) { 1053 callout_stop(&sc->sc_ledtimer); 1054 ath_hal_gpioset(ah, sc->sc_ledpin, 1055 !sc->sc_ledon); 1056 sc->sc_blinking = 0; 1057 } 1058 ath_hal_intrset(ah, 0); 1059 } 1060 ath_draintxq(sc); 1061 if (!sc->sc_invalid) { 1062 ath_stoprecv(sc); 1063 ath_hal_phydisable(ah); 1064 } else 1065 sc->sc_rxlink = NULL; 1066 IFQ_DRV_PURGE(&ifp->if_snd); 1067 ath_beacon_free(sc); 1068 } 1069 } 1070 1071 static void 1072 ath_stop(struct ifnet *ifp) 1073 { 1074 struct ath_softc *sc = ifp->if_softc; 1075 1076 ATH_LOCK(sc); 1077 ath_stop_locked(ifp); 1078 if (!sc->sc_invalid) { 1079 /* 1080 * Set the chip in full sleep mode. Note that we are 1081 * careful to do this only when bringing the interface 1082 * completely to a stop. When the chip is in this state 1083 * it must be carefully woken up or references to 1084 * registers in the PCI clock domain may freeze the bus 1085 * (and system). This varies by chip and is mostly an 1086 * issue with newer parts that go to sleep more quickly. 1087 */ 1088 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 1089 } 1090 ATH_UNLOCK(sc); 1091 } 1092 1093 /* 1094 * Reset the hardware w/o losing operational state. This is 1095 * basically a more efficient way of doing ath_stop, ath_init, 1096 * followed by state transitions to the current 802.11 1097 * operational state. Used to recover from various errors and 1098 * to reset or reload hardware state. 1099 */ 1100 static int 1101 ath_reset(struct ifnet *ifp) 1102 { 1103 struct ath_softc *sc = ifp->if_softc; 1104 struct ieee80211com *ic = &sc->sc_ic; 1105 struct ath_hal *ah = sc->sc_ah; 1106 HAL_STATUS status; 1107 1108 /* 1109 * Convert to a HAL channel description with the flags 1110 * constrained to reflect the current operating mode. 1111 */ 1112 ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan); 1113 1114 ath_hal_intrset(ah, 0); /* disable interrupts */ 1115 ath_draintxq(sc); /* stop xmit side */ 1116 ath_stoprecv(sc); /* stop recv side */ 1117 /* NB: indicate channel change so we do a full reset */ 1118 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1119 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1120 __func__, status); 1121 ath_update_txpow(sc); /* update tx power state */ 1122 sc->sc_diversity = ath_hal_getdiversity(ah); 1123 sc->sc_calinterval = 1; 1124 sc->sc_caltries = 0; 1125 /* 1126 * We may be doing a reset in response to an ioctl 1127 * that changes the channel so update any state that 1128 * might change as a result. 1129 */ 1130 ath_chan_change(sc, ic->ic_curchan); 1131 if (ath_startrecv(sc) != 0) /* restart recv */ 1132 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1133 if (ic->ic_state == IEEE80211_S_RUN) 1134 ath_beacon_config(sc); /* restart beacons */ 1135 ath_hal_intrset(ah, sc->sc_imask); 1136 1137 ath_start(ifp); /* restart xmit */ 1138 return 0; 1139 } 1140 1141 static void 1142 ath_start(struct ifnet *ifp) 1143 { 1144 struct ath_softc *sc = ifp->if_softc; 1145 struct ath_hal *ah = sc->sc_ah; 1146 struct ieee80211com *ic = &sc->sc_ic; 1147 struct ieee80211_node *ni; 1148 struct ath_buf *bf; 1149 struct mbuf *m; 1150 struct ieee80211_frame *wh; 1151 struct ether_header *eh; 1152 1153 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 1154 return; 1155 for (;;) { 1156 /* 1157 * Grab a TX buffer and associated resources. 1158 */ 1159 ATH_TXBUF_LOCK(sc); 1160 bf = STAILQ_FIRST(&sc->sc_txbuf); 1161 if (bf != NULL) 1162 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1163 ATH_TXBUF_UNLOCK(sc); 1164 if (bf == NULL) { 1165 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 1166 __func__); 1167 sc->sc_stats.ast_tx_qstop++; 1168 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1169 break; 1170 } 1171 /* 1172 * Poll the management queue for frames; they 1173 * have priority over normal data frames. 1174 */ 1175 IF_DEQUEUE(&ic->ic_mgtq, m); 1176 if (m == NULL) { 1177 /* 1178 * No data frames go out unless we're associated. 1179 */ 1180 if (ic->ic_state != IEEE80211_S_RUN) { 1181 DPRINTF(sc, ATH_DEBUG_XMIT, 1182 "%s: discard data packet, state %s\n", 1183 __func__, 1184 ieee80211_state_name[ic->ic_state]); 1185 sc->sc_stats.ast_tx_discard++; 1186 ATH_TXBUF_LOCK(sc); 1187 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1188 ATH_TXBUF_UNLOCK(sc); 1189 break; 1190 } 1191 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ 1192 if (m == NULL) { 1193 ATH_TXBUF_LOCK(sc); 1194 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1195 ATH_TXBUF_UNLOCK(sc); 1196 break; 1197 } 1198 /* 1199 * Find the node for the destination so we can do 1200 * things like power save and fast frames aggregation. 1201 */ 1202 if (m->m_len < sizeof(struct ether_header) && 1203 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { 1204 ic->ic_stats.is_tx_nobuf++; /* XXX */ 1205 ni = NULL; 1206 goto bad; 1207 } 1208 eh = mtod(m, struct ether_header *); 1209 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 1210 if (ni == NULL) { 1211 /* NB: ieee80211_find_txnode does stat+msg */ 1212 m_freem(m); 1213 goto bad; 1214 } 1215 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && 1216 (m->m_flags & M_PWR_SAV) == 0) { 1217 /* 1218 * Station in power save mode; pass the frame 1219 * to the 802.11 layer and continue. We'll get 1220 * the frame back when the time is right. 1221 */ 1222 ieee80211_pwrsave(ic, ni, m); 1223 goto reclaim; 1224 } 1225 /* calculate priority so we can find the tx queue */ 1226 if (ieee80211_classify(ic, m, ni)) { 1227 DPRINTF(sc, ATH_DEBUG_XMIT, 1228 "%s: discard, classification failure\n", 1229 __func__); 1230 m_freem(m); 1231 goto bad; 1232 } 1233 ifp->if_opackets++; 1234 BPF_MTAP(ifp, m); 1235 /* 1236 * Encapsulate the packet in prep for transmission. 1237 */ 1238 m = ieee80211_encap(ic, m, ni); 1239 if (m == NULL) { 1240 DPRINTF(sc, ATH_DEBUG_XMIT, 1241 "%s: encapsulation failure\n", 1242 __func__); 1243 sc->sc_stats.ast_tx_encap++; 1244 goto bad; 1245 } 1246 } else { 1247 /* 1248 * Hack! The referenced node pointer is in the 1249 * rcvif field of the packet header. This is 1250 * placed there by ieee80211_mgmt_output because 1251 * we need to hold the reference with the frame 1252 * and there's no other way (other than packet 1253 * tags which we consider too expensive to use) 1254 * to pass it along. 1255 */ 1256 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1257 m->m_pkthdr.rcvif = NULL; 1258 1259 wh = mtod(m, struct ieee80211_frame *); 1260 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1261 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 1262 /* fill time stamp */ 1263 u_int64_t tsf; 1264 u_int32_t *tstamp; 1265 1266 tsf = ath_hal_gettsf64(ah); 1267 /* XXX: adjust 100us delay to xmit */ 1268 tsf += 100; 1269 tstamp = (u_int32_t *)&wh[1]; 1270 tstamp[0] = htole32(tsf & 0xffffffff); 1271 tstamp[1] = htole32(tsf >> 32); 1272 } 1273 sc->sc_stats.ast_tx_mgmt++; 1274 } 1275 1276 if (ath_tx_start(sc, ni, bf, m)) { 1277 bad: 1278 ifp->if_oerrors++; 1279 reclaim: 1280 ATH_TXBUF_LOCK(sc); 1281 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1282 ATH_TXBUF_UNLOCK(sc); 1283 if (ni != NULL) 1284 ieee80211_free_node(ni); 1285 continue; 1286 } 1287 1288 sc->sc_tx_timer = 5; 1289 ifp->if_timer = 1; 1290 } 1291 } 1292 1293 static int 1294 ath_media_change(struct ifnet *ifp) 1295 { 1296 #define IS_UP(ifp) \ 1297 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1298 int error; 1299 1300 error = ieee80211_media_change(ifp); 1301 if (error == ENETRESET) { 1302 struct ath_softc *sc = ifp->if_softc; 1303 struct ieee80211com *ic = &sc->sc_ic; 1304 1305 if (ic->ic_opmode == IEEE80211_M_AHDEMO) { 1306 /* 1307 * Adhoc demo mode is just ibss mode w/o beacons 1308 * (mostly). The hal knows nothing about it; 1309 * tell it we're operating in ibss mode. 1310 */ 1311 sc->sc_opmode = HAL_M_IBSS; 1312 } else 1313 sc->sc_opmode = ic->ic_opmode; 1314 if (IS_UP(ifp)) 1315 ath_init(ifp->if_softc); /* XXX lose error */ 1316 error = 0; 1317 } 1318 return error; 1319 #undef IS_UP 1320 } 1321 1322 #ifdef ATH_DEBUG 1323 static void 1324 ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, 1325 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1326 { 1327 static const char *ciphers[] = { 1328 "WEP", 1329 "AES-OCB", 1330 "AES-CCM", 1331 "CKIP", 1332 "TKIP", 1333 "CLR", 1334 }; 1335 int i, n; 1336 1337 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 1338 for (i = 0, n = hk->kv_len; i < n; i++) 1339 printf("%02x", hk->kv_val[i]); 1340 printf(" mac %s", ether_sprintf(mac)); 1341 if (hk->kv_type == HAL_CIPHER_TKIP) { 1342 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); 1343 for (i = 0; i < sizeof(hk->kv_mic); i++) 1344 printf("%02x", hk->kv_mic[i]); 1345 #if HAL_ABI_VERSION > 0x06052200 1346 if (!sc->sc_splitmic) { 1347 printf(" txmic "); 1348 for (i = 0; i < sizeof(hk->kv_txmic); i++) 1349 printf("%02x", hk->kv_txmic[i]); 1350 } 1351 #endif 1352 } 1353 printf("\n"); 1354 } 1355 #endif 1356 1357 /* 1358 * Set a TKIP key into the hardware. This handles the 1359 * potential distribution of key state to multiple key 1360 * cache slots for TKIP. 1361 */ 1362 static int 1363 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 1364 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1365 { 1366 #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 1367 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 1368 struct ath_hal *ah = sc->sc_ah; 1369 1370 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 1371 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 1372 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 1373 if (sc->sc_splitmic) { 1374 /* 1375 * TX key goes at first index, RX key at the rx index. 1376 * The hal handles the MIC keys at index+64. 1377 */ 1378 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 1379 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1380 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 1381 return 0; 1382 1383 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1384 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 1385 /* XXX delete tx key on failure? */ 1386 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 1387 } else { 1388 /* 1389 * Room for both TX+RX MIC keys in one key cache 1390 * slot, just set key at the first index; the hal 1391 * will handle the reset. 1392 */ 1393 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1394 #if HAL_ABI_VERSION > 0x06052200 1395 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); 1396 #endif 1397 KEYPRINTF(sc, k->wk_keyix, hk, mac); 1398 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 1399 } 1400 } else if (k->wk_flags & IEEE80211_KEY_XR) { 1401 /* 1402 * TX/RX key goes at first index. 1403 * The hal handles the MIC keys are index+64. 1404 */ 1405 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? 1406 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); 1407 KEYPRINTF(sc, k->wk_keyix, hk, mac); 1408 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 1409 } 1410 return 0; 1411 #undef IEEE80211_KEY_XR 1412 } 1413 1414 /* 1415 * Set a net80211 key into the hardware. This handles the 1416 * potential distribution of key state to multiple key 1417 * cache slots for TKIP with hardware MIC support. 1418 */ 1419 static int 1420 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 1421 const u_int8_t mac0[IEEE80211_ADDR_LEN], 1422 struct ieee80211_node *bss) 1423 { 1424 #define N(a) (sizeof(a)/sizeof(a[0])) 1425 static const u_int8_t ciphermap[] = { 1426 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1427 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1428 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1429 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1430 (u_int8_t) -1, /* 4 is not allocated */ 1431 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1432 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1433 }; 1434 struct ath_hal *ah = sc->sc_ah; 1435 const struct ieee80211_cipher *cip = k->wk_cipher; 1436 u_int8_t gmac[IEEE80211_ADDR_LEN]; 1437 const u_int8_t *mac; 1438 HAL_KEYVAL hk; 1439 1440 memset(&hk, 0, sizeof(hk)); 1441 /* 1442 * Software crypto uses a "clear key" so non-crypto 1443 * state kept in the key cache are maintained and 1444 * so that rx frames have an entry to match. 1445 */ 1446 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1447 KASSERT(cip->ic_cipher < N(ciphermap), 1448 ("invalid cipher type %u", cip->ic_cipher)); 1449 hk.kv_type = ciphermap[cip->ic_cipher]; 1450 hk.kv_len = k->wk_keylen; 1451 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 1452 } else 1453 hk.kv_type = HAL_CIPHER_CLR; 1454 1455 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { 1456 /* 1457 * Group keys on hardware that supports multicast frame 1458 * key search use a mac that is the sender's address with 1459 * the high bit set instead of the app-specified address. 1460 */ 1461 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); 1462 gmac[0] |= 0x80; 1463 mac = gmac; 1464 } else 1465 mac = mac0; 1466 1467 if (hk.kv_type == HAL_CIPHER_TKIP && 1468 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1469 return ath_keyset_tkip(sc, k, &hk, mac); 1470 } else { 1471 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 1472 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 1473 } 1474 #undef N 1475 } 1476 1477 /* 1478 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1479 * each key, one for decrypt/encrypt and the other for the MIC. 1480 */ 1481 static u_int16_t 1482 key_alloc_2pair(struct ath_softc *sc, 1483 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1484 { 1485 #define N(a) (sizeof(a)/sizeof(a[0])) 1486 u_int i, keyix; 1487 1488 KASSERT(sc->sc_splitmic, ("key cache !split")); 1489 /* XXX could optimize */ 1490 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1491 u_int8_t b = sc->sc_keymap[i]; 1492 if (b != 0xff) { 1493 /* 1494 * One or more slots in this byte are free. 1495 */ 1496 keyix = i*NBBY; 1497 while (b & 1) { 1498 again: 1499 keyix++; 1500 b >>= 1; 1501 } 1502 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1503 if (isset(sc->sc_keymap, keyix+32) || 1504 isset(sc->sc_keymap, keyix+64) || 1505 isset(sc->sc_keymap, keyix+32+64)) { 1506 /* full pair unavailable */ 1507 /* XXX statistic */ 1508 if (keyix == (i+1)*NBBY) { 1509 /* no slots were appropriate, advance */ 1510 continue; 1511 } 1512 goto again; 1513 } 1514 setbit(sc->sc_keymap, keyix); 1515 setbit(sc->sc_keymap, keyix+64); 1516 setbit(sc->sc_keymap, keyix+32); 1517 setbit(sc->sc_keymap, keyix+32+64); 1518 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1519 "%s: key pair %u,%u %u,%u\n", 1520 __func__, keyix, keyix+64, 1521 keyix+32, keyix+32+64); 1522 *txkeyix = keyix; 1523 *rxkeyix = keyix+32; 1524 return 1; 1525 } 1526 } 1527 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1528 return 0; 1529 #undef N 1530 } 1531 1532 /* 1533 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1534 * each key, one for decrypt/encrypt and the other for the MIC. 1535 */ 1536 static u_int16_t 1537 key_alloc_pair(struct ath_softc *sc, 1538 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1539 { 1540 #define N(a) (sizeof(a)/sizeof(a[0])) 1541 u_int i, keyix; 1542 1543 KASSERT(!sc->sc_splitmic, ("key cache split")); 1544 /* XXX could optimize */ 1545 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1546 u_int8_t b = sc->sc_keymap[i]; 1547 if (b != 0xff) { 1548 /* 1549 * One or more slots in this byte are free. 1550 */ 1551 keyix = i*NBBY; 1552 while (b & 1) { 1553 again: 1554 keyix++; 1555 b >>= 1; 1556 } 1557 if (isset(sc->sc_keymap, keyix+64)) { 1558 /* full pair unavailable */ 1559 /* XXX statistic */ 1560 if (keyix == (i+1)*NBBY) { 1561 /* no slots were appropriate, advance */ 1562 continue; 1563 } 1564 goto again; 1565 } 1566 setbit(sc->sc_keymap, keyix); 1567 setbit(sc->sc_keymap, keyix+64); 1568 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1569 "%s: key pair %u,%u\n", 1570 __func__, keyix, keyix+64); 1571 *txkeyix = *rxkeyix = keyix; 1572 return 1; 1573 } 1574 } 1575 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1576 return 0; 1577 #undef N 1578 } 1579 1580 /* 1581 * Allocate a single key cache slot. 1582 */ 1583 static int 1584 key_alloc_single(struct ath_softc *sc, 1585 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1586 { 1587 #define N(a) (sizeof(a)/sizeof(a[0])) 1588 u_int i, keyix; 1589 1590 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1591 for (i = 0; i < N(sc->sc_keymap); i++) { 1592 u_int8_t b = sc->sc_keymap[i]; 1593 if (b != 0xff) { 1594 /* 1595 * One or more slots are free. 1596 */ 1597 keyix = i*NBBY; 1598 while (b & 1) 1599 keyix++, b >>= 1; 1600 setbit(sc->sc_keymap, keyix); 1601 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 1602 __func__, keyix); 1603 *txkeyix = *rxkeyix = keyix; 1604 return 1; 1605 } 1606 } 1607 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 1608 return 0; 1609 #undef N 1610 } 1611 1612 /* 1613 * Allocate one or more key cache slots for a uniacst key. The 1614 * key itself is needed only to identify the cipher. For hardware 1615 * TKIP with split cipher+MIC keys we allocate two key cache slot 1616 * pairs so that we can setup separate TX and RX MIC keys. Note 1617 * that the MIC key for a TKIP key at slot i is assumed by the 1618 * hardware to be at slot i+64. This limits TKIP keys to the first 1619 * 64 entries. 1620 */ 1621 static int 1622 ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k, 1623 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 1624 { 1625 struct ath_softc *sc = ic->ic_ifp->if_softc; 1626 1627 /* 1628 * Group key allocation must be handled specially for 1629 * parts that do not support multicast key cache search 1630 * functionality. For those parts the key id must match 1631 * the h/w key index so lookups find the right key. On 1632 * parts w/ the key search facility we install the sender's 1633 * mac address (with the high bit set) and let the hardware 1634 * find the key w/o using the key id. This is preferred as 1635 * it permits us to support multiple users for adhoc and/or 1636 * multi-station operation. 1637 */ 1638 if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { 1639 if (!(&ic->ic_nw_keys[0] <= k && 1640 k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) { 1641 /* should not happen */ 1642 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1643 "%s: bogus group key\n", __func__); 1644 return 0; 1645 } 1646 /* 1647 * XXX we pre-allocate the global keys so 1648 * have no way to check if they've already been allocated. 1649 */ 1650 *keyix = *rxkeyix = k - ic->ic_nw_keys; 1651 return 1; 1652 } 1653 1654 /* 1655 * We allocate two pair for TKIP when using the h/w to do 1656 * the MIC. For everything else, including software crypto, 1657 * we allocate a single entry. Note that s/w crypto requires 1658 * a pass-through slot on the 5211 and 5212. The 5210 does 1659 * not support pass-through cache entries and we map all 1660 * those requests to slot 0. 1661 */ 1662 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1663 return key_alloc_single(sc, keyix, rxkeyix); 1664 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1665 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1666 if (sc->sc_splitmic) 1667 return key_alloc_2pair(sc, keyix, rxkeyix); 1668 else 1669 return key_alloc_pair(sc, keyix, rxkeyix); 1670 } else { 1671 return key_alloc_single(sc, keyix, rxkeyix); 1672 } 1673 } 1674 1675 /* 1676 * Delete an entry in the key cache allocated by ath_key_alloc. 1677 */ 1678 static int 1679 ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) 1680 { 1681 struct ath_softc *sc = ic->ic_ifp->if_softc; 1682 struct ath_hal *ah = sc->sc_ah; 1683 const struct ieee80211_cipher *cip = k->wk_cipher; 1684 u_int keyix = k->wk_keyix; 1685 1686 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 1687 1688 ath_hal_keyreset(ah, keyix); 1689 /* 1690 * Handle split tx/rx keying required for TKIP with h/w MIC. 1691 */ 1692 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1693 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1694 ath_hal_keyreset(ah, keyix+32); /* RX key */ 1695 if (keyix >= IEEE80211_WEP_NKID) { 1696 /* 1697 * Don't touch keymap entries for global keys so 1698 * they are never considered for dynamic allocation. 1699 */ 1700 clrbit(sc->sc_keymap, keyix); 1701 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1702 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1703 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 1704 if (sc->sc_splitmic) { 1705 /* +32 for RX key, +32+64 for RX key MIC */ 1706 clrbit(sc->sc_keymap, keyix+32); 1707 clrbit(sc->sc_keymap, keyix+32+64); 1708 } 1709 } 1710 } 1711 return 1; 1712 } 1713 1714 /* 1715 * Set the key cache contents for the specified key. Key cache 1716 * slot(s) must already have been allocated by ath_key_alloc. 1717 */ 1718 static int 1719 ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, 1720 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1721 { 1722 struct ath_softc *sc = ic->ic_ifp->if_softc; 1723 1724 return ath_keyset(sc, k, mac, ic->ic_bss); 1725 } 1726 1727 /* 1728 * Block/unblock tx+rx processing while a key change is done. 1729 * We assume the caller serializes key management operations 1730 * so we only need to worry about synchronization with other 1731 * uses that originate in the driver. 1732 */ 1733 static void 1734 ath_key_update_begin(struct ieee80211com *ic) 1735 { 1736 struct ifnet *ifp = ic->ic_ifp; 1737 struct ath_softc *sc = ifp->if_softc; 1738 1739 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1740 #if 0 1741 tasklet_disable(&sc->sc_rxtq); 1742 #endif 1743 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1744 } 1745 1746 static void 1747 ath_key_update_end(struct ieee80211com *ic) 1748 { 1749 struct ifnet *ifp = ic->ic_ifp; 1750 struct ath_softc *sc = ifp->if_softc; 1751 1752 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1753 IF_UNLOCK(&ifp->if_snd); 1754 #if 0 1755 tasklet_enable(&sc->sc_rxtq); 1756 #endif 1757 } 1758 1759 /* 1760 * Calculate the receive filter according to the 1761 * operating mode and state: 1762 * 1763 * o always accept unicast, broadcast, and multicast traffic 1764 * o maintain current state of phy error reception (the hal 1765 * may enable phy error frames for noise immunity work) 1766 * o probe request frames are accepted only when operating in 1767 * hostap, adhoc, or monitor modes 1768 * o enable promiscuous mode according to the interface state 1769 * o accept beacons: 1770 * - when operating in adhoc mode so the 802.11 layer creates 1771 * node table entries for peers, 1772 * - when operating in station mode for collecting rssi data when 1773 * the station is otherwise quiet, or 1774 * - when scanning 1775 * o accept control frames: 1776 * - when in monitor mode 1777 */ 1778 static u_int32_t 1779 ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) 1780 { 1781 #define RX_FILTER_PRESERVE (HAL_RX_FILTER_PHYERR | HAL_RX_FILTER_PHYRADAR) 1782 struct ieee80211com *ic = &sc->sc_ic; 1783 struct ath_hal *ah = sc->sc_ah; 1784 struct ifnet *ifp = sc->sc_ifp; 1785 u_int32_t rfilt; 1786 1787 rfilt = (ath_hal_getrxfilter(ah) & RX_FILTER_PRESERVE) 1788 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1789 if (ic->ic_opmode != IEEE80211_M_STA) 1790 rfilt |= HAL_RX_FILTER_PROBEREQ; 1791 if (ic->ic_opmode != IEEE80211_M_HOSTAP && 1792 (ifp->if_flags & IFF_PROMISC)) 1793 rfilt |= HAL_RX_FILTER_PROM; 1794 if (ic->ic_opmode == IEEE80211_M_STA || 1795 ic->ic_opmode == IEEE80211_M_IBSS || 1796 state == IEEE80211_S_SCAN) 1797 rfilt |= HAL_RX_FILTER_BEACON; 1798 if (ic->ic_opmode == IEEE80211_M_MONITOR) 1799 rfilt |= HAL_RX_FILTER_CONTROL; 1800 return rfilt; 1801 #undef RX_FILTER_PRESERVE 1802 } 1803 1804 static void 1805 ath_mode_init(struct ath_softc *sc) 1806 { 1807 struct ieee80211com *ic = &sc->sc_ic; 1808 struct ath_hal *ah = sc->sc_ah; 1809 struct ifnet *ifp = sc->sc_ifp; 1810 u_int32_t rfilt, mfilt[2], val; 1811 u_int8_t pos; 1812 struct ifmultiaddr *ifma; 1813 1814 /* configure rx filter */ 1815 rfilt = ath_calcrxfilter(sc, ic->ic_state); 1816 ath_hal_setrxfilter(ah, rfilt); 1817 1818 /* configure operational mode */ 1819 ath_hal_setopmode(ah); 1820 1821 /* 1822 * Handle any link-level address change. Note that we only 1823 * need to force ic_myaddr; any other addresses are handled 1824 * as a byproduct of the ifnet code marking the interface 1825 * down then up. 1826 * 1827 * XXX should get from lladdr instead of arpcom but that's more work 1828 */ 1829 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); 1830 ath_hal_setmac(ah, ic->ic_myaddr); 1831 1832 /* calculate and install multicast filter */ 1833 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1834 mfilt[0] = mfilt[1] = 0; 1835 IF_ADDR_LOCK(ifp); 1836 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1837 caddr_t dl; 1838 1839 /* calculate XOR of eight 6bit values */ 1840 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1841 val = LE_READ_4(dl + 0); 1842 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1843 val = LE_READ_4(dl + 3); 1844 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1845 pos &= 0x3f; 1846 mfilt[pos / 32] |= (1 << (pos % 32)); 1847 } 1848 IF_ADDR_UNLOCK(ifp); 1849 } else { 1850 mfilt[0] = mfilt[1] = ~0; 1851 } 1852 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); 1853 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", 1854 __func__, rfilt, mfilt[0], mfilt[1]); 1855 } 1856 1857 /* 1858 * Set the slot time based on the current setting. 1859 */ 1860 static void 1861 ath_setslottime(struct ath_softc *sc) 1862 { 1863 struct ieee80211com *ic = &sc->sc_ic; 1864 struct ath_hal *ah = sc->sc_ah; 1865 u_int usec; 1866 1867 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 1868 usec = 13; 1869 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 1870 usec = 21; 1871 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 1872 /* honor short/long slot time only in 11g */ 1873 /* XXX shouldn't honor on pure g or turbo g channel */ 1874 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1875 usec = HAL_SLOT_TIME_9; 1876 else 1877 usec = HAL_SLOT_TIME_20; 1878 } else 1879 usec = HAL_SLOT_TIME_9; 1880 1881 DPRINTF(sc, ATH_DEBUG_RESET, 1882 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 1883 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 1884 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 1885 1886 ath_hal_setslottime(ah, usec); 1887 sc->sc_updateslot = OK; 1888 } 1889 1890 /* 1891 * Callback from the 802.11 layer to update the 1892 * slot time based on the current setting. 1893 */ 1894 static void 1895 ath_updateslot(struct ifnet *ifp) 1896 { 1897 struct ath_softc *sc = ifp->if_softc; 1898 struct ieee80211com *ic = &sc->sc_ic; 1899 1900 /* 1901 * When not coordinating the BSS, change the hardware 1902 * immediately. For other operation we defer the change 1903 * until beacon updates have propagated to the stations. 1904 */ 1905 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 1906 sc->sc_updateslot = UPDATE; 1907 else 1908 ath_setslottime(sc); 1909 } 1910 1911 /* 1912 * Setup a h/w transmit queue for beacons. 1913 */ 1914 static int 1915 ath_beaconq_setup(struct ath_hal *ah) 1916 { 1917 HAL_TXQ_INFO qi; 1918 1919 memset(&qi, 0, sizeof(qi)); 1920 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 1921 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 1922 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 1923 /* NB: for dynamic turbo, don't enable any other interrupts */ 1924 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 1925 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 1926 } 1927 1928 /* 1929 * Setup the transmit queue parameters for the beacon queue. 1930 */ 1931 static int 1932 ath_beaconq_config(struct ath_softc *sc) 1933 { 1934 #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 1935 struct ieee80211com *ic = &sc->sc_ic; 1936 struct ath_hal *ah = sc->sc_ah; 1937 HAL_TXQ_INFO qi; 1938 1939 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 1940 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1941 /* 1942 * Always burst out beacon and CAB traffic. 1943 */ 1944 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 1945 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 1946 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 1947 } else { 1948 struct wmeParams *wmep = 1949 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 1950 /* 1951 * Adhoc mode; important thing is to use 2x cwmin. 1952 */ 1953 qi.tqi_aifs = wmep->wmep_aifsn; 1954 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 1955 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 1956 } 1957 1958 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 1959 device_printf(sc->sc_dev, "unable to update parameters for " 1960 "beacon hardware queue!\n"); 1961 return 0; 1962 } else { 1963 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 1964 return 1; 1965 } 1966 #undef ATH_EXPONENT_TO_VALUE 1967 } 1968 1969 /* 1970 * Allocate and setup an initial beacon frame. 1971 */ 1972 static int 1973 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1974 { 1975 struct ieee80211com *ic = ni->ni_ic; 1976 struct ath_buf *bf; 1977 struct mbuf *m; 1978 int error; 1979 1980 bf = STAILQ_FIRST(&sc->sc_bbuf); 1981 if (bf == NULL) { 1982 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); 1983 sc->sc_stats.ast_be_nombuf++; /* XXX */ 1984 return ENOMEM; /* XXX */ 1985 } 1986 /* 1987 * NB: the beacon data buffer must be 32-bit aligned; 1988 * we assume the mbuf routines will return us something 1989 * with this alignment (perhaps should assert). 1990 */ 1991 m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); 1992 if (m == NULL) { 1993 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", 1994 __func__); 1995 sc->sc_stats.ast_be_nombuf++; 1996 return ENOMEM; 1997 } 1998 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 1999 bf->bf_segs, &bf->bf_nseg, 2000 BUS_DMA_NOWAIT); 2001 if (error == 0) { 2002 bf->bf_m = m; 2003 bf->bf_node = ieee80211_ref_node(ni); 2004 } else { 2005 m_freem(m); 2006 } 2007 return error; 2008 } 2009 2010 /* 2011 * Setup the beacon frame for transmit. 2012 */ 2013 static void 2014 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2015 { 2016 #define USE_SHPREAMBLE(_ic) \ 2017 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2018 == IEEE80211_F_SHPREAMBLE) 2019 struct ieee80211_node *ni = bf->bf_node; 2020 struct ieee80211com *ic = ni->ni_ic; 2021 struct mbuf *m = bf->bf_m; 2022 struct ath_hal *ah = sc->sc_ah; 2023 struct ath_desc *ds; 2024 int flags, antenna; 2025 const HAL_RATE_TABLE *rt; 2026 u_int8_t rix, rate; 2027 2028 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2029 __func__, m, m->m_len); 2030 2031 /* setup descriptors */ 2032 ds = bf->bf_desc; 2033 2034 flags = HAL_TXDESC_NOACK; 2035 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2036 ds->ds_link = bf->bf_daddr; /* self-linked */ 2037 flags |= HAL_TXDESC_VEOL; 2038 /* 2039 * Let hardware handle antenna switching. 2040 */ 2041 antenna = sc->sc_txantenna; 2042 } else { 2043 ds->ds_link = 0; 2044 /* 2045 * Switch antenna every 4 beacons. 2046 * XXX assumes two antenna 2047 */ 2048 antenna = sc->sc_txantenna != 0 ? sc->sc_txantenna 2049 : (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2050 } 2051 2052 KASSERT(bf->bf_nseg == 1, 2053 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2054 ds->ds_data = bf->bf_segs[0].ds_addr; 2055 /* 2056 * Calculate rate code. 2057 * XXX everything at min xmit rate 2058 */ 2059 rix = sc->sc_minrateix; 2060 rt = sc->sc_currates; 2061 rate = rt->info[rix].rateCode; 2062 if (USE_SHPREAMBLE(ic)) 2063 rate |= rt->info[rix].shortPreamble; 2064 ath_hal_setuptxdesc(ah, ds 2065 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2066 , sizeof(struct ieee80211_frame)/* header length */ 2067 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2068 , ni->ni_txpower /* txpower XXX */ 2069 , rate, 1 /* series 0 rate/tries */ 2070 , HAL_TXKEYIX_INVALID /* no encryption */ 2071 , antenna /* antenna mode */ 2072 , flags /* no ack, veol for beacons */ 2073 , 0 /* rts/cts rate */ 2074 , 0 /* rts/cts duration */ 2075 ); 2076 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2077 ath_hal_filltxdesc(ah, ds 2078 , roundup(m->m_len, 4) /* buffer length */ 2079 , AH_TRUE /* first segment */ 2080 , AH_TRUE /* last segment */ 2081 , ds /* first descriptor */ 2082 ); 2083 #undef USE_SHPREAMBLE 2084 } 2085 2086 /* 2087 * Append the contents of src to dst; both queues 2088 * are assumed to be locked. 2089 */ 2090 static void 2091 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2092 { 2093 STAILQ_CONCAT(&dst->axq_q, &src->axq_q); 2094 dst->axq_link = src->axq_link; 2095 src->axq_link = NULL; 2096 dst->axq_depth += src->axq_depth; 2097 src->axq_depth = 0; 2098 } 2099 2100 /* 2101 * Transmit a beacon frame at SWBA. Dynamic updates to the 2102 * frame contents are done as needed and the slot time is 2103 * also adjusted based on current state. 2104 */ 2105 static void 2106 ath_beacon_proc(void *arg, int pending) 2107 { 2108 struct ath_softc *sc = arg; 2109 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 2110 struct ieee80211_node *ni = bf->bf_node; 2111 struct ieee80211com *ic = ni->ni_ic; 2112 struct ath_hal *ah = sc->sc_ah; 2113 struct ath_txq *cabq = sc->sc_cabq; 2114 struct mbuf *m; 2115 int ncabq, nmcastq, error, otherant; 2116 2117 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2118 __func__, pending); 2119 2120 if (ic->ic_opmode == IEEE80211_M_STA || 2121 ic->ic_opmode == IEEE80211_M_MONITOR || 2122 bf == NULL || bf->bf_m == NULL) { 2123 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", 2124 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); 2125 return; 2126 } 2127 /* 2128 * Check if the previous beacon has gone out. If 2129 * not don't try to post another, skip this period 2130 * and wait for the next. Missed beacons indicate 2131 * a problem and should not occur. If we miss too 2132 * many consecutive beacons reset the device. 2133 */ 2134 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2135 sc->sc_bmisscount++; 2136 DPRINTF(sc, ATH_DEBUG_BEACON, 2137 "%s: missed %u consecutive beacons\n", 2138 __func__, sc->sc_bmisscount); 2139 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 2140 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2141 return; 2142 } 2143 if (sc->sc_bmisscount != 0) { 2144 DPRINTF(sc, ATH_DEBUG_BEACON, 2145 "%s: resume beacon xmit after %u misses\n", 2146 __func__, sc->sc_bmisscount); 2147 sc->sc_bmisscount = 0; 2148 } 2149 2150 /* 2151 * Update dynamic beacon contents. If this returns 2152 * non-zero then we need to remap the memory because 2153 * the beacon frame changed size (probably because 2154 * of the TIM bitmap). 2155 */ 2156 m = bf->bf_m; 2157 nmcastq = sc->sc_mcastq.axq_depth; 2158 ncabq = ath_hal_numtxpending(ah, cabq->axq_qnum); 2159 if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq+nmcastq)) { 2160 /* XXX too conservative? */ 2161 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2162 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2163 bf->bf_segs, &bf->bf_nseg, 2164 BUS_DMA_NOWAIT); 2165 if (error != 0) { 2166 if_printf(ic->ic_ifp, 2167 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 2168 __func__, error); 2169 return; 2170 } 2171 } 2172 if (ncabq && (sc->sc_boff.bo_tim[4] & 1)) { 2173 /* 2174 * CABQ traffic from the previous DTIM is still pending. 2175 * This is ok for now but when there are multiple vap's 2176 * and we are using staggered beacons we'll want to drain 2177 * the cabq before loading frames for the different vap. 2178 */ 2179 DPRINTF(sc, ATH_DEBUG_BEACON, 2180 "%s: cabq did not drain, mcastq %u cabq %u/%u\n", 2181 __func__, nmcastq, ncabq, cabq->axq_depth); 2182 sc->sc_stats.ast_cabq_busy++; 2183 } 2184 2185 /* 2186 * Handle slot time change when a non-ERP station joins/leaves 2187 * an 11g network. The 802.11 layer notifies us via callback, 2188 * we mark updateslot, then wait one beacon before effecting 2189 * the change. This gives associated stations at least one 2190 * beacon interval to note the state change. 2191 */ 2192 /* XXX locking */ 2193 if (sc->sc_updateslot == UPDATE) 2194 sc->sc_updateslot = COMMIT; /* commit next beacon */ 2195 else if (sc->sc_updateslot == COMMIT) 2196 ath_setslottime(sc); /* commit change to h/w */ 2197 2198 /* 2199 * Check recent per-antenna transmit statistics and flip 2200 * the default antenna if noticeably more frames went out 2201 * on the non-default antenna. 2202 * XXX assumes 2 anntenae 2203 */ 2204 otherant = sc->sc_defant & 1 ? 2 : 1; 2205 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2206 ath_setdefantenna(sc, otherant); 2207 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 2208 2209 /* 2210 * Construct tx descriptor. 2211 */ 2212 ath_beacon_setup(sc, bf); 2213 2214 /* 2215 * Stop any current dma and put the new frame on the queue. 2216 * This should never fail since we check above that no frames 2217 * are still pending on the queue. 2218 */ 2219 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 2220 DPRINTF(sc, ATH_DEBUG_ANY, 2221 "%s: beacon queue %u did not stop?\n", 2222 __func__, sc->sc_bhalq); 2223 } 2224 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 2225 2226 /* 2227 * Enable the CAB queue before the beacon queue to 2228 * insure cab frames are triggered by this beacon. 2229 */ 2230 if (sc->sc_boff.bo_tim_len && (sc->sc_boff.bo_tim[4] & 1)) { 2231 /* NB: only at DTIM */ 2232 ATH_TXQ_LOCK(cabq); 2233 ATH_TXQ_LOCK(&sc->sc_mcastq); 2234 if (nmcastq) { 2235 struct ath_buf *bfm; 2236 2237 /* 2238 * Move frames from the s/w mcast q to the h/w cab q. 2239 */ 2240 bfm = STAILQ_FIRST(&sc->sc_mcastq.axq_q); 2241 if (cabq->axq_link != NULL) { 2242 *cabq->axq_link = bfm->bf_daddr; 2243 } else 2244 ath_hal_puttxbuf(ah, cabq->axq_qnum, 2245 bfm->bf_daddr); 2246 ath_txqmove(cabq, &sc->sc_mcastq); 2247 2248 sc->sc_stats.ast_cabq_xmit += nmcastq; 2249 } 2250 /* NB: gated by beacon so safe to start here */ 2251 ath_hal_txstart(ah, cabq->axq_qnum); 2252 ATH_TXQ_UNLOCK(cabq); 2253 ATH_TXQ_UNLOCK(&sc->sc_mcastq); 2254 } 2255 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 2256 ath_hal_txstart(ah, sc->sc_bhalq); 2257 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 2258 "%s: TXDP[%u] = %p (%p)\n", __func__, 2259 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); 2260 2261 sc->sc_stats.ast_be_xmit++; 2262 } 2263 2264 /* 2265 * Reset the hardware after detecting beacons have stopped. 2266 */ 2267 static void 2268 ath_bstuck_proc(void *arg, int pending) 2269 { 2270 struct ath_softc *sc = arg; 2271 struct ifnet *ifp = sc->sc_ifp; 2272 2273 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2274 sc->sc_bmisscount); 2275 ath_reset(ifp); 2276 } 2277 2278 /* 2279 * Reclaim beacon resources. 2280 */ 2281 static void 2282 ath_beacon_free(struct ath_softc *sc) 2283 { 2284 struct ath_buf *bf; 2285 2286 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 2287 if (bf->bf_m != NULL) { 2288 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2289 m_freem(bf->bf_m); 2290 bf->bf_m = NULL; 2291 } 2292 if (bf->bf_node != NULL) { 2293 ieee80211_free_node(bf->bf_node); 2294 bf->bf_node = NULL; 2295 } 2296 } 2297 } 2298 2299 /* 2300 * Configure the beacon and sleep timers. 2301 * 2302 * When operating as an AP this resets the TSF and sets 2303 * up the hardware to notify us when we need to issue beacons. 2304 * 2305 * When operating in station mode this sets up the beacon 2306 * timers according to the timestamp of the last received 2307 * beacon and the current TSF, configures PCF and DTIM 2308 * handling, programs the sleep registers so the hardware 2309 * will wakeup in time to receive beacons, and configures 2310 * the beacon miss handling so we'll receive a BMISS 2311 * interrupt when we stop seeing beacons from the AP 2312 * we've associated with. 2313 */ 2314 static void 2315 ath_beacon_config(struct ath_softc *sc) 2316 { 2317 #define TSF_TO_TU(_h,_l) \ 2318 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 2319 #define FUDGE 2 2320 struct ath_hal *ah = sc->sc_ah; 2321 struct ieee80211com *ic = &sc->sc_ic; 2322 struct ieee80211_node *ni = ic->ic_bss; 2323 u_int32_t nexttbtt, intval, tsftu; 2324 u_int64_t tsf; 2325 2326 /* extract tstamp from last beacon and convert to TU */ 2327 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 2328 LE_READ_4(ni->ni_tstamp.data)); 2329 /* NB: the beacon interval is kept internally in TU's */ 2330 intval = ni->ni_intval & HAL_BEACON_PERIOD; 2331 if (nexttbtt == 0) /* e.g. for ap mode */ 2332 nexttbtt = intval; 2333 else if (intval) /* NB: can be 0 for monitor mode */ 2334 nexttbtt = roundup(nexttbtt, intval); 2335 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 2336 __func__, nexttbtt, intval, ni->ni_intval); 2337 if (ic->ic_opmode == IEEE80211_M_STA) { 2338 HAL_BEACON_STATE bs; 2339 int dtimperiod, dtimcount; 2340 int cfpperiod, cfpcount; 2341 2342 /* 2343 * Setup dtim and cfp parameters according to 2344 * last beacon we received (which may be none). 2345 */ 2346 dtimperiod = ni->ni_dtim_period; 2347 if (dtimperiod <= 0) /* NB: 0 if not known */ 2348 dtimperiod = 1; 2349 dtimcount = ni->ni_dtim_count; 2350 if (dtimcount >= dtimperiod) /* NB: sanity check */ 2351 dtimcount = 0; /* XXX? */ 2352 cfpperiod = 1; /* NB: no PCF support yet */ 2353 cfpcount = 0; 2354 /* 2355 * Pull nexttbtt forward to reflect the current 2356 * TSF and calculate dtim+cfp state for the result. 2357 */ 2358 tsf = ath_hal_gettsf64(ah); 2359 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2360 do { 2361 nexttbtt += intval; 2362 if (--dtimcount < 0) { 2363 dtimcount = dtimperiod - 1; 2364 if (--cfpcount < 0) 2365 cfpcount = cfpperiod - 1; 2366 } 2367 } while (nexttbtt < tsftu); 2368 memset(&bs, 0, sizeof(bs)); 2369 bs.bs_intval = intval; 2370 bs.bs_nexttbtt = nexttbtt; 2371 bs.bs_dtimperiod = dtimperiod*intval; 2372 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 2373 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 2374 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 2375 bs.bs_cfpmaxduration = 0; 2376 #if 0 2377 /* 2378 * The 802.11 layer records the offset to the DTIM 2379 * bitmap while receiving beacons; use it here to 2380 * enable h/w detection of our AID being marked in 2381 * the bitmap vector (to indicate frames for us are 2382 * pending at the AP). 2383 * XXX do DTIM handling in s/w to WAR old h/w bugs 2384 * XXX enable based on h/w rev for newer chips 2385 */ 2386 bs.bs_timoffset = ni->ni_timoff; 2387 #endif 2388 /* 2389 * Calculate the number of consecutive beacons to miss 2390 * before taking a BMISS interrupt. The configuration 2391 * is specified in ms, so we need to convert that to 2392 * TU's and then calculate based on the beacon interval. 2393 * Note that we clamp the result to at most 10 beacons. 2394 */ 2395 bs.bs_bmissthreshold = ic->ic_bmissthreshold; 2396 if (bs.bs_bmissthreshold > 10) 2397 bs.bs_bmissthreshold = 10; 2398 else if (bs.bs_bmissthreshold <= 0) 2399 bs.bs_bmissthreshold = 1; 2400 2401 /* 2402 * Calculate sleep duration. The configuration is 2403 * given in ms. We insure a multiple of the beacon 2404 * period is used. Also, if the sleep duration is 2405 * greater than the DTIM period then it makes senses 2406 * to make it a multiple of that. 2407 * 2408 * XXX fixed at 100ms 2409 */ 2410 bs.bs_sleepduration = 2411 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 2412 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2413 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2414 2415 DPRINTF(sc, ATH_DEBUG_BEACON, 2416 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2417 , __func__ 2418 , tsf, tsftu 2419 , bs.bs_intval 2420 , bs.bs_nexttbtt 2421 , bs.bs_dtimperiod 2422 , bs.bs_nextdtim 2423 , bs.bs_bmissthreshold 2424 , bs.bs_sleepduration 2425 , bs.bs_cfpperiod 2426 , bs.bs_cfpmaxduration 2427 , bs.bs_cfpnext 2428 , bs.bs_timoffset 2429 ); 2430 ath_hal_intrset(ah, 0); 2431 ath_hal_beacontimers(ah, &bs); 2432 sc->sc_imask |= HAL_INT_BMISS; 2433 ath_hal_intrset(ah, sc->sc_imask); 2434 } else { 2435 ath_hal_intrset(ah, 0); 2436 if (nexttbtt == intval) 2437 intval |= HAL_BEACON_RESET_TSF; 2438 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2439 /* 2440 * In IBSS mode enable the beacon timers but only 2441 * enable SWBA interrupts if we need to manually 2442 * prepare beacon frames. Otherwise we use a 2443 * self-linked tx descriptor and let the hardware 2444 * deal with things. 2445 */ 2446 intval |= HAL_BEACON_ENA; 2447 if (!sc->sc_hasveol) 2448 sc->sc_imask |= HAL_INT_SWBA; 2449 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 2450 /* 2451 * Pull nexttbtt forward to reflect 2452 * the current TSF. 2453 */ 2454 tsf = ath_hal_gettsf64(ah); 2455 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2456 do { 2457 nexttbtt += intval; 2458 } while (nexttbtt < tsftu); 2459 } 2460 ath_beaconq_config(sc); 2461 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2462 /* 2463 * In AP mode we enable the beacon timers and 2464 * SWBA interrupts to prepare beacon frames. 2465 */ 2466 intval |= HAL_BEACON_ENA; 2467 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2468 ath_beaconq_config(sc); 2469 } 2470 ath_hal_beaconinit(ah, nexttbtt, intval); 2471 sc->sc_bmisscount = 0; 2472 ath_hal_intrset(ah, sc->sc_imask); 2473 /* 2474 * When using a self-linked beacon descriptor in 2475 * ibss mode load it once here. 2476 */ 2477 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2478 ath_beacon_proc(sc, 0); 2479 } 2480 sc->sc_syncbeacon = 0; 2481 #undef FUDGE 2482 #undef TSF_TO_TU 2483 } 2484 2485 static void 2486 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2487 { 2488 bus_addr_t *paddr = (bus_addr_t*) arg; 2489 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2490 *paddr = segs->ds_addr; 2491 } 2492 2493 static int 2494 ath_descdma_setup(struct ath_softc *sc, 2495 struct ath_descdma *dd, ath_bufhead *head, 2496 const char *name, int nbuf, int ndesc) 2497 { 2498 #define DS2PHYS(_dd, _ds) \ 2499 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2500 struct ifnet *ifp = sc->sc_ifp; 2501 struct ath_desc *ds; 2502 struct ath_buf *bf; 2503 int i, bsize, error; 2504 2505 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2506 __func__, name, nbuf, ndesc); 2507 2508 dd->dd_name = name; 2509 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 2510 2511 /* 2512 * Setup DMA descriptor area. 2513 */ 2514 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2515 PAGE_SIZE, 0, /* alignment, bounds */ 2516 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2517 BUS_SPACE_MAXADDR, /* highaddr */ 2518 NULL, NULL, /* filter, filterarg */ 2519 dd->dd_desc_len, /* maxsize */ 2520 1, /* nsegments */ 2521 dd->dd_desc_len, /* maxsegsize */ 2522 BUS_DMA_ALLOCNOW, /* flags */ 2523 NULL, /* lockfunc */ 2524 NULL, /* lockarg */ 2525 &dd->dd_dmat); 2526 if (error != 0) { 2527 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2528 return error; 2529 } 2530 2531 /* allocate descriptors */ 2532 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2533 if (error != 0) { 2534 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2535 "error %u\n", dd->dd_name, error); 2536 goto fail0; 2537 } 2538 2539 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2540 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2541 &dd->dd_dmamap); 2542 if (error != 0) { 2543 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2544 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2545 goto fail1; 2546 } 2547 2548 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2549 dd->dd_desc, dd->dd_desc_len, 2550 ath_load_cb, &dd->dd_desc_paddr, 2551 BUS_DMA_NOWAIT); 2552 if (error != 0) { 2553 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2554 dd->dd_name, error); 2555 goto fail2; 2556 } 2557 2558 ds = dd->dd_desc; 2559 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2560 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2561 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2562 2563 /* allocate rx buffers */ 2564 bsize = sizeof(struct ath_buf) * nbuf; 2565 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2566 if (bf == NULL) { 2567 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2568 dd->dd_name, bsize); 2569 goto fail3; 2570 } 2571 dd->dd_bufptr = bf; 2572 2573 STAILQ_INIT(head); 2574 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 2575 bf->bf_desc = ds; 2576 bf->bf_daddr = DS2PHYS(dd, ds); 2577 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2578 &bf->bf_dmamap); 2579 if (error != 0) { 2580 if_printf(ifp, "unable to create dmamap for %s " 2581 "buffer %u, error %u\n", dd->dd_name, i, error); 2582 ath_descdma_cleanup(sc, dd, head); 2583 return error; 2584 } 2585 STAILQ_INSERT_TAIL(head, bf, bf_list); 2586 } 2587 return 0; 2588 fail3: 2589 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2590 fail2: 2591 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2592 fail1: 2593 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2594 fail0: 2595 bus_dma_tag_destroy(dd->dd_dmat); 2596 memset(dd, 0, sizeof(*dd)); 2597 return error; 2598 #undef DS2PHYS 2599 } 2600 2601 static void 2602 ath_descdma_cleanup(struct ath_softc *sc, 2603 struct ath_descdma *dd, ath_bufhead *head) 2604 { 2605 struct ath_buf *bf; 2606 struct ieee80211_node *ni; 2607 2608 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2609 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2610 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2611 bus_dma_tag_destroy(dd->dd_dmat); 2612 2613 STAILQ_FOREACH(bf, head, bf_list) { 2614 if (bf->bf_m) { 2615 m_freem(bf->bf_m); 2616 bf->bf_m = NULL; 2617 } 2618 if (bf->bf_dmamap != NULL) { 2619 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2620 bf->bf_dmamap = NULL; 2621 } 2622 ni = bf->bf_node; 2623 bf->bf_node = NULL; 2624 if (ni != NULL) { 2625 /* 2626 * Reclaim node reference. 2627 */ 2628 ieee80211_free_node(ni); 2629 } 2630 } 2631 2632 STAILQ_INIT(head); 2633 free(dd->dd_bufptr, M_ATHDEV); 2634 memset(dd, 0, sizeof(*dd)); 2635 } 2636 2637 static int 2638 ath_desc_alloc(struct ath_softc *sc) 2639 { 2640 int error; 2641 2642 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2643 "rx", ath_rxbuf, 1); 2644 if (error != 0) 2645 return error; 2646 2647 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2648 "tx", ath_txbuf, ATH_TXDESC); 2649 if (error != 0) { 2650 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2651 return error; 2652 } 2653 2654 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2655 "beacon", 1, 1); 2656 if (error != 0) { 2657 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2658 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2659 return error; 2660 } 2661 return 0; 2662 } 2663 2664 static void 2665 ath_desc_free(struct ath_softc *sc) 2666 { 2667 2668 if (sc->sc_bdma.dd_desc_len != 0) 2669 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2670 if (sc->sc_txdma.dd_desc_len != 0) 2671 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2672 if (sc->sc_rxdma.dd_desc_len != 0) 2673 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2674 } 2675 2676 static struct ieee80211_node * 2677 ath_node_alloc(struct ieee80211_node_table *nt) 2678 { 2679 struct ieee80211com *ic = nt->nt_ic; 2680 struct ath_softc *sc = ic->ic_ifp->if_softc; 2681 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2682 struct ath_node *an; 2683 2684 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2685 if (an == NULL) { 2686 /* XXX stat+msg */ 2687 return NULL; 2688 } 2689 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; 2690 ath_rate_node_init(sc, an); 2691 2692 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2693 return &an->an_node; 2694 } 2695 2696 static void 2697 ath_node_free(struct ieee80211_node *ni) 2698 { 2699 struct ieee80211com *ic = ni->ni_ic; 2700 struct ath_softc *sc = ic->ic_ifp->if_softc; 2701 2702 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2703 2704 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2705 sc->sc_node_free(ni); 2706 } 2707 2708 static u_int8_t 2709 ath_node_getrssi(const struct ieee80211_node *ni) 2710 { 2711 #define HAL_EP_RND(x, mul) \ 2712 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 2713 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; 2714 int32_t rssi; 2715 2716 /* 2717 * When only one frame is received there will be no state in 2718 * avgrssi so fallback on the value recorded by the 802.11 layer. 2719 */ 2720 if (avgrssi != ATH_RSSI_DUMMY_MARKER) 2721 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); 2722 else 2723 rssi = ni->ni_rssi; 2724 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; 2725 #undef HAL_EP_RND 2726 } 2727 2728 static int 2729 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 2730 { 2731 struct ath_hal *ah = sc->sc_ah; 2732 int error; 2733 struct mbuf *m; 2734 struct ath_desc *ds; 2735 2736 m = bf->bf_m; 2737 if (m == NULL) { 2738 /* 2739 * NB: by assigning a page to the rx dma buffer we 2740 * implicitly satisfy the Atheros requirement that 2741 * this buffer be cache-line-aligned and sized to be 2742 * multiple of the cache line size. Not doing this 2743 * causes weird stuff to happen (for the 5210 at least). 2744 */ 2745 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2746 if (m == NULL) { 2747 DPRINTF(sc, ATH_DEBUG_ANY, 2748 "%s: no mbuf/cluster\n", __func__); 2749 sc->sc_stats.ast_rx_nombuf++; 2750 return ENOMEM; 2751 } 2752 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 2753 2754 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 2755 bf->bf_dmamap, m, 2756 bf->bf_segs, &bf->bf_nseg, 2757 BUS_DMA_NOWAIT); 2758 if (error != 0) { 2759 DPRINTF(sc, ATH_DEBUG_ANY, 2760 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 2761 __func__, error); 2762 sc->sc_stats.ast_rx_busdma++; 2763 m_freem(m); 2764 return error; 2765 } 2766 KASSERT(bf->bf_nseg == 1, 2767 ("multi-segment packet; nseg %u", bf->bf_nseg)); 2768 bf->bf_m = m; 2769 } 2770 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 2771 2772 /* 2773 * Setup descriptors. For receive we always terminate 2774 * the descriptor list with a self-linked entry so we'll 2775 * not get overrun under high load (as can happen with a 2776 * 5212 when ANI processing enables PHY error frames). 2777 * 2778 * To insure the last descriptor is self-linked we create 2779 * each descriptor as self-linked and add it to the end. As 2780 * each additional descriptor is added the previous self-linked 2781 * entry is ``fixed'' naturally. This should be safe even 2782 * if DMA is happening. When processing RX interrupts we 2783 * never remove/process the last, self-linked, entry on the 2784 * descriptor list. This insures the hardware always has 2785 * someplace to write a new frame. 2786 */ 2787 ds = bf->bf_desc; 2788 ds->ds_link = bf->bf_daddr; /* link to self */ 2789 ds->ds_data = bf->bf_segs[0].ds_addr; 2790 ath_hal_setuprxdesc(ah, ds 2791 , m->m_len /* buffer size */ 2792 , 0 2793 ); 2794 2795 if (sc->sc_rxlink != NULL) 2796 *sc->sc_rxlink = bf->bf_daddr; 2797 sc->sc_rxlink = &ds->ds_link; 2798 return 0; 2799 } 2800 2801 /* 2802 * Extend 15-bit time stamp from rx descriptor to 2803 * a full 64-bit TSF using the specified TSF. 2804 */ 2805 static __inline u_int64_t 2806 ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf) 2807 { 2808 if ((tsf & 0x7fff) < rstamp) 2809 tsf -= 0x8000; 2810 return ((tsf &~ 0x7fff) | rstamp); 2811 } 2812 2813 /* 2814 * Intercept management frames to collect beacon rssi data 2815 * and to do ibss merges. 2816 */ 2817 static void 2818 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2819 struct ieee80211_node *ni, 2820 int subtype, int rssi, u_int32_t rstamp) 2821 { 2822 struct ath_softc *sc = ic->ic_ifp->if_softc; 2823 2824 /* 2825 * Call up first so subsequent work can use information 2826 * potentially stored in the node (e.g. for ibss merge). 2827 */ 2828 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); 2829 switch (subtype) { 2830 case IEEE80211_FC0_SUBTYPE_BEACON: 2831 /* update rssi statistics for use by the hal */ 2832 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 2833 if (sc->sc_syncbeacon && 2834 ni == ic->ic_bss && ic->ic_state == IEEE80211_S_RUN) { 2835 /* 2836 * Resync beacon timers using the tsf of the beacon 2837 * frame we just received. 2838 */ 2839 ath_beacon_config(sc); 2840 } 2841 /* fall thru... */ 2842 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2843 if (ic->ic_opmode == IEEE80211_M_IBSS && 2844 ic->ic_state == IEEE80211_S_RUN) { 2845 u_int64_t tsf = ath_extend_tsf(rstamp, 2846 ath_hal_gettsf64(sc->sc_ah)); 2847 /* 2848 * Handle ibss merge as needed; check the tsf on the 2849 * frame before attempting the merge. The 802.11 spec 2850 * says the station should change it's bssid to match 2851 * the oldest station with the same ssid, where oldest 2852 * is determined by the tsf. Note that hardware 2853 * reconfiguration happens through callback to 2854 * ath_newstate as the state machine will go from 2855 * RUN -> RUN when this happens. 2856 */ 2857 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 2858 DPRINTF(sc, ATH_DEBUG_STATE, 2859 "ibss merge, rstamp %u tsf %ju " 2860 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 2861 (uintmax_t)ni->ni_tstamp.tsf); 2862 (void) ieee80211_ibss_merge(ni); 2863 } 2864 } 2865 break; 2866 } 2867 } 2868 2869 /* 2870 * Set the default antenna. 2871 */ 2872 static void 2873 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 2874 { 2875 struct ath_hal *ah = sc->sc_ah; 2876 2877 /* XXX block beacon interrupts */ 2878 ath_hal_setdefantenna(ah, antenna); 2879 if (sc->sc_defant != antenna) 2880 sc->sc_stats.ast_ant_defswitch++; 2881 sc->sc_defant = antenna; 2882 sc->sc_rxotherant = 0; 2883 } 2884 2885 static int 2886 ath_rx_tap(struct ath_softc *sc, struct mbuf *m, 2887 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 2888 { 2889 u_int8_t rix; 2890 2891 KASSERT(sc->sc_drvbpf != NULL, ("no tap")); 2892 2893 /* 2894 * Discard anything shorter than an ack or cts. 2895 */ 2896 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) { 2897 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n", 2898 __func__, m->m_pkthdr.len); 2899 sc->sc_stats.ast_rx_tooshort++; 2900 return 0; 2901 } 2902 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf)); 2903 rix = rs->rs_rate; 2904 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 2905 if (rs->rs_status & HAL_RXERR_CRC) 2906 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 2907 /* XXX propagate other error flags from descriptor */ 2908 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 2909 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf; 2910 sc->sc_rx_th.wr_antnoise = nf; 2911 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 2912 2913 bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); 2914 2915 return 1; 2916 } 2917 2918 static void 2919 ath_rx_proc(void *arg, int npending) 2920 { 2921 #define PA2DESC(_sc, _pa) \ 2922 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 2923 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 2924 struct ath_softc *sc = arg; 2925 struct ath_buf *bf; 2926 struct ieee80211com *ic = &sc->sc_ic; 2927 struct ifnet *ifp = sc->sc_ifp; 2928 struct ath_hal *ah = sc->sc_ah; 2929 struct ath_desc *ds; 2930 struct ath_rx_status *rs; 2931 struct mbuf *m; 2932 struct ieee80211_node *ni; 2933 struct ath_node *an; 2934 int len, type, ngood; 2935 u_int phyerr; 2936 HAL_STATUS status; 2937 int16_t nf; 2938 u_int64_t tsf; 2939 2940 NET_LOCK_GIANT(); /* XXX */ 2941 2942 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 2943 ngood = 0; 2944 nf = ath_hal_getchannoise(ah, &sc->sc_curchan); 2945 tsf = ath_hal_gettsf64(ah); 2946 do { 2947 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2948 if (bf == NULL) { /* NB: shouldn't happen */ 2949 if_printf(ifp, "%s: no buffer!\n", __func__); 2950 break; 2951 } 2952 m = bf->bf_m; 2953 if (m == NULL) { /* NB: shouldn't happen */ 2954 /* 2955 * If mbuf allocation failed previously there 2956 * will be no mbuf; try again to re-populate it. 2957 */ 2958 /* XXX make debug msg */ 2959 if_printf(ifp, "%s: no mbuf!\n", __func__); 2960 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2961 goto rx_next; 2962 } 2963 ds = bf->bf_desc; 2964 if (ds->ds_link == bf->bf_daddr) { 2965 /* NB: never process the self-linked entry at the end */ 2966 break; 2967 } 2968 /* XXX sync descriptor memory */ 2969 /* 2970 * Must provide the virtual address of the current 2971 * descriptor, the physical address, and the virtual 2972 * address of the next descriptor in the h/w chain. 2973 * This allows the HAL to look ahead to see if the 2974 * hardware is done with a descriptor by checking the 2975 * done bit in the following descriptor and the address 2976 * of the current descriptor the DMA engine is working 2977 * on. All this is necessary because of our use of 2978 * a self-linked list to avoid rx overruns. 2979 */ 2980 rs = &bf->bf_status.ds_rxstat; 2981 status = ath_hal_rxprocdesc(ah, ds, 2982 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 2983 #ifdef ATH_DEBUG 2984 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 2985 ath_printrxbuf(bf, 0, status == HAL_OK); 2986 #endif 2987 if (status == HAL_EINPROGRESS) 2988 break; 2989 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2990 if (rs->rs_more) { 2991 /* 2992 * Frame spans multiple descriptors; this 2993 * cannot happen yet as we don't support 2994 * jumbograms. If not in monitor mode, 2995 * discard the frame. 2996 */ 2997 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2998 sc->sc_stats.ast_rx_toobig++; 2999 goto rx_next; 3000 } 3001 /* fall thru for monitor mode handling... */ 3002 } else if (rs->rs_status != 0) { 3003 if (rs->rs_status & HAL_RXERR_CRC) 3004 sc->sc_stats.ast_rx_crcerr++; 3005 if (rs->rs_status & HAL_RXERR_FIFO) 3006 sc->sc_stats.ast_rx_fifoerr++; 3007 if (rs->rs_status & HAL_RXERR_PHY) { 3008 sc->sc_stats.ast_rx_phyerr++; 3009 phyerr = rs->rs_phyerr & 0x1f; 3010 sc->sc_stats.ast_rx_phy[phyerr]++; 3011 goto rx_next; 3012 } 3013 if (rs->rs_status & HAL_RXERR_DECRYPT) { 3014 /* 3015 * Decrypt error. If the error occurred 3016 * because there was no hardware key, then 3017 * let the frame through so the upper layers 3018 * can process it. This is necessary for 5210 3019 * parts which have no way to setup a ``clear'' 3020 * key cache entry. 3021 * 3022 * XXX do key cache faulting 3023 */ 3024 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 3025 goto rx_accept; 3026 sc->sc_stats.ast_rx_badcrypt++; 3027 } 3028 if (rs->rs_status & HAL_RXERR_MIC) { 3029 sc->sc_stats.ast_rx_badmic++; 3030 /* 3031 * Do minimal work required to hand off 3032 * the 802.11 header for notifcation. 3033 */ 3034 /* XXX frag's and qos frames */ 3035 len = rs->rs_datalen; 3036 if (len >= sizeof (struct ieee80211_frame)) { 3037 bus_dmamap_sync(sc->sc_dmat, 3038 bf->bf_dmamap, 3039 BUS_DMASYNC_POSTREAD); 3040 ieee80211_notify_michael_failure(ic, 3041 mtod(m, struct ieee80211_frame *), 3042 sc->sc_splitmic ? 3043 rs->rs_keyix-32 : rs->rs_keyix 3044 ); 3045 } 3046 } 3047 ifp->if_ierrors++; 3048 /* 3049 * When a tap is present pass error frames 3050 * that have been requested. By default we 3051 * pass decrypt+mic errors but others may be 3052 * interesting (e.g. crc). 3053 */ 3054 if (bpf_peers_present(sc->sc_drvbpf) && 3055 (rs->rs_status & sc->sc_monpass)) { 3056 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3057 BUS_DMASYNC_POSTREAD); 3058 /* NB: bpf needs the mbuf length setup */ 3059 len = rs->rs_datalen; 3060 m->m_pkthdr.len = m->m_len = len; 3061 (void) ath_rx_tap(sc, m, rs, tsf, nf); 3062 } 3063 /* XXX pass MIC errors up for s/w reclaculation */ 3064 goto rx_next; 3065 } 3066 rx_accept: 3067 /* 3068 * Sync and unmap the frame. At this point we're 3069 * committed to passing the mbuf somewhere so clear 3070 * bf_m; this means a new mbuf must be allocated 3071 * when the rx descriptor is setup again to receive 3072 * another frame. 3073 */ 3074 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3075 BUS_DMASYNC_POSTREAD); 3076 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3077 bf->bf_m = NULL; 3078 3079 m->m_pkthdr.rcvif = ifp; 3080 len = rs->rs_datalen; 3081 m->m_pkthdr.len = m->m_len = len; 3082 3083 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 3084 3085 if (bpf_peers_present(sc->sc_drvbpf) && 3086 !ath_rx_tap(sc, m, rs, tsf, nf)) { 3087 m_freem(m); /* XXX reclaim */ 3088 goto rx_next; 3089 } 3090 3091 /* 3092 * From this point on we assume the frame is at least 3093 * as large as ieee80211_frame_min; verify that. 3094 */ 3095 if (len < IEEE80211_MIN_LEN) { 3096 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 3097 __func__, len); 3098 sc->sc_stats.ast_rx_tooshort++; 3099 m_freem(m); 3100 goto rx_next; 3101 } 3102 3103 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 3104 ieee80211_dump_pkt(mtod(m, caddr_t), len, 3105 sc->sc_hwmap[rs->rs_rate].ieeerate, 3106 rs->rs_rssi); 3107 } 3108 3109 m_adj(m, -IEEE80211_CRC_LEN); 3110 3111 /* 3112 * Locate the node for sender, track state, and then 3113 * pass the (referenced) node up to the 802.11 layer 3114 * for its use. 3115 */ 3116 ni = ieee80211_find_rxnode_withkey(ic, 3117 mtod(m, const struct ieee80211_frame_min *), 3118 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 3119 IEEE80211_KEYIX_NONE : rs->rs_keyix); 3120 /* 3121 * Track rx rssi and do any rx antenna management. 3122 */ 3123 an = ATH_NODE(ni); 3124 ATH_RSSI_LPF(an->an_avgrssi, rs->rs_rssi); 3125 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 3126 /* 3127 * Send frame up for processing. 3128 */ 3129 type = ieee80211_input(ic, m, ni, rs->rs_rssi, rs->rs_tstamp); 3130 ieee80211_free_node(ni); 3131 if (sc->sc_diversity) { 3132 /* 3133 * When using fast diversity, change the default rx 3134 * antenna if diversity chooses the other antenna 3 3135 * times in a row. 3136 */ 3137 if (sc->sc_defant != rs->rs_antenna) { 3138 if (++sc->sc_rxotherant >= 3) 3139 ath_setdefantenna(sc, rs->rs_antenna); 3140 } else 3141 sc->sc_rxotherant = 0; 3142 } 3143 if (sc->sc_softled) { 3144 /* 3145 * Blink for any data frame. Otherwise do a 3146 * heartbeat-style blink when idle. The latter 3147 * is mainly for station mode where we depend on 3148 * periodic beacon frames to trigger the poll event. 3149 */ 3150 if (type == IEEE80211_FC0_TYPE_DATA) { 3151 sc->sc_rxrate = rs->rs_rate; 3152 ath_led_event(sc, ATH_LED_RX); 3153 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 3154 ath_led_event(sc, ATH_LED_POLL); 3155 } 3156 /* 3157 * Arrange to update the last rx timestamp only for 3158 * frames from our ap when operating in station mode. 3159 * This assumes the rx key is always setup when associated. 3160 */ 3161 if (ic->ic_opmode == IEEE80211_M_STA && 3162 rs->rs_keyix != HAL_RXKEYIX_INVALID) 3163 ngood++; 3164 rx_next: 3165 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 3166 } while (ath_rxbuf_init(sc, bf) == 0); 3167 3168 /* rx signal state monitoring */ 3169 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); 3170 if (ngood) 3171 sc->sc_lastrx = tsf; 3172 3173 /* NB: may want to check mgtq too */ 3174 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && 3175 !IFQ_IS_EMPTY(&ifp->if_snd)) 3176 ath_start(ifp); 3177 3178 NET_UNLOCK_GIANT(); /* XXX */ 3179 #undef PA2DESC 3180 } 3181 3182 static void 3183 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3184 { 3185 txq->axq_qnum = qnum; 3186 txq->axq_depth = 0; 3187 txq->axq_intrcnt = 0; 3188 txq->axq_link = NULL; 3189 STAILQ_INIT(&txq->axq_q); 3190 ATH_TXQ_LOCK_INIT(sc, txq); 3191 } 3192 3193 /* 3194 * Setup a h/w transmit queue. 3195 */ 3196 static struct ath_txq * 3197 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3198 { 3199 #define N(a) (sizeof(a)/sizeof(a[0])) 3200 struct ath_hal *ah = sc->sc_ah; 3201 HAL_TXQ_INFO qi; 3202 int qnum; 3203 3204 memset(&qi, 0, sizeof(qi)); 3205 qi.tqi_subtype = subtype; 3206 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3207 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3208 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3209 /* 3210 * Enable interrupts only for EOL and DESC conditions. 3211 * We mark tx descriptors to receive a DESC interrupt 3212 * when a tx queue gets deep; otherwise waiting for the 3213 * EOL to reap descriptors. Note that this is done to 3214 * reduce interrupt load and this only defers reaping 3215 * descriptors, never transmitting frames. Aside from 3216 * reducing interrupts this also permits more concurrency. 3217 * The only potential downside is if the tx queue backs 3218 * up in which case the top half of the kernel may backup 3219 * due to a lack of tx descriptors. 3220 */ 3221 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3222 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3223 if (qnum == -1) { 3224 /* 3225 * NB: don't print a message, this happens 3226 * normally on parts with too few tx queues 3227 */ 3228 return NULL; 3229 } 3230 if (qnum >= N(sc->sc_txq)) { 3231 device_printf(sc->sc_dev, 3232 "hal qnum %u out of range, max %zu!\n", 3233 qnum, N(sc->sc_txq)); 3234 ath_hal_releasetxqueue(ah, qnum); 3235 return NULL; 3236 } 3237 if (!ATH_TXQ_SETUP(sc, qnum)) { 3238 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3239 sc->sc_txqsetup |= 1<<qnum; 3240 } 3241 return &sc->sc_txq[qnum]; 3242 #undef N 3243 } 3244 3245 /* 3246 * Setup a hardware data transmit queue for the specified 3247 * access control. The hal may not support all requested 3248 * queues in which case it will return a reference to a 3249 * previously setup queue. We record the mapping from ac's 3250 * to h/w queues for use by ath_tx_start and also track 3251 * the set of h/w queues being used to optimize work in the 3252 * transmit interrupt handler and related routines. 3253 */ 3254 static int 3255 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3256 { 3257 #define N(a) (sizeof(a)/sizeof(a[0])) 3258 struct ath_txq *txq; 3259 3260 if (ac >= N(sc->sc_ac2q)) { 3261 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3262 ac, N(sc->sc_ac2q)); 3263 return 0; 3264 } 3265 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3266 if (txq != NULL) { 3267 sc->sc_ac2q[ac] = txq; 3268 return 1; 3269 } else 3270 return 0; 3271 #undef N 3272 } 3273 3274 /* 3275 * Update WME parameters for a transmit queue. 3276 */ 3277 static int 3278 ath_txq_update(struct ath_softc *sc, int ac) 3279 { 3280 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3281 #define ATH_TXOP_TO_US(v) (v<<5) 3282 struct ieee80211com *ic = &sc->sc_ic; 3283 struct ath_txq *txq = sc->sc_ac2q[ac]; 3284 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3285 struct ath_hal *ah = sc->sc_ah; 3286 HAL_TXQ_INFO qi; 3287 3288 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3289 qi.tqi_aifs = wmep->wmep_aifsn; 3290 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3291 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3292 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3293 3294 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3295 device_printf(sc->sc_dev, "unable to update hardware queue " 3296 "parameters for %s traffic!\n", 3297 ieee80211_wme_acnames[ac]); 3298 return 0; 3299 } else { 3300 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3301 return 1; 3302 } 3303 #undef ATH_TXOP_TO_US 3304 #undef ATH_EXPONENT_TO_VALUE 3305 } 3306 3307 /* 3308 * Callback from the 802.11 layer to update WME parameters. 3309 */ 3310 static int 3311 ath_wme_update(struct ieee80211com *ic) 3312 { 3313 struct ath_softc *sc = ic->ic_ifp->if_softc; 3314 3315 return !ath_txq_update(sc, WME_AC_BE) || 3316 !ath_txq_update(sc, WME_AC_BK) || 3317 !ath_txq_update(sc, WME_AC_VI) || 3318 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3319 } 3320 3321 /* 3322 * Reclaim resources for a setup queue. 3323 */ 3324 static void 3325 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3326 { 3327 3328 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3329 ATH_TXQ_LOCK_DESTROY(txq); 3330 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3331 } 3332 3333 /* 3334 * Reclaim all tx queue resources. 3335 */ 3336 static void 3337 ath_tx_cleanup(struct ath_softc *sc) 3338 { 3339 int i; 3340 3341 ATH_TXBUF_LOCK_DESTROY(sc); 3342 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3343 if (ATH_TXQ_SETUP(sc, i)) 3344 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3345 ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq); 3346 } 3347 3348 /* 3349 * Defragment an mbuf chain, returning at most maxfrags separate 3350 * mbufs+clusters. If this is not possible NULL is returned and 3351 * the original mbuf chain is left in it's present (potentially 3352 * modified) state. We use two techniques: collapsing consecutive 3353 * mbufs and replacing consecutive mbufs by a cluster. 3354 */ 3355 static struct mbuf * 3356 ath_defrag(struct mbuf *m0, int how, int maxfrags) 3357 { 3358 struct mbuf *m, *n, *n2, **prev; 3359 u_int curfrags; 3360 3361 /* 3362 * Calculate the current number of frags. 3363 */ 3364 curfrags = 0; 3365 for (m = m0; m != NULL; m = m->m_next) 3366 curfrags++; 3367 /* 3368 * First, try to collapse mbufs. Note that we always collapse 3369 * towards the front so we don't need to deal with moving the 3370 * pkthdr. This may be suboptimal if the first mbuf has much 3371 * less data than the following. 3372 */ 3373 m = m0; 3374 again: 3375 for (;;) { 3376 n = m->m_next; 3377 if (n == NULL) 3378 break; 3379 if ((m->m_flags & M_RDONLY) == 0 && 3380 n->m_len < M_TRAILINGSPACE(m)) { 3381 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 3382 n->m_len); 3383 m->m_len += n->m_len; 3384 m->m_next = n->m_next; 3385 m_free(n); 3386 if (--curfrags <= maxfrags) 3387 return m0; 3388 } else 3389 m = n; 3390 } 3391 KASSERT(maxfrags > 1, 3392 ("maxfrags %u, but normal collapse failed", maxfrags)); 3393 /* 3394 * Collapse consecutive mbufs to a cluster. 3395 */ 3396 prev = &m0->m_next; /* NB: not the first mbuf */ 3397 while ((n = *prev) != NULL) { 3398 if ((n2 = n->m_next) != NULL && 3399 n->m_len + n2->m_len < MCLBYTES) { 3400 m = m_getcl(how, MT_DATA, 0); 3401 if (m == NULL) 3402 goto bad; 3403 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 3404 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 3405 n2->m_len); 3406 m->m_len = n->m_len + n2->m_len; 3407 m->m_next = n2->m_next; 3408 *prev = m; 3409 m_free(n); 3410 m_free(n2); 3411 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 3412 return m0; 3413 /* 3414 * Still not there, try the normal collapse 3415 * again before we allocate another cluster. 3416 */ 3417 goto again; 3418 } 3419 prev = &n->m_next; 3420 } 3421 /* 3422 * No place where we can collapse to a cluster; punt. 3423 * This can occur if, for example, you request 2 frags 3424 * but the packet requires that both be clusters (we 3425 * never reallocate the first mbuf to avoid moving the 3426 * packet header). 3427 */ 3428 bad: 3429 return NULL; 3430 } 3431 3432 /* 3433 * Return h/w rate index for an IEEE rate (w/o basic rate bit). 3434 */ 3435 static int 3436 ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate) 3437 { 3438 int i; 3439 3440 for (i = 0; i < rt->rateCount; i++) 3441 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate) 3442 return i; 3443 return 0; /* NB: lowest rate */ 3444 } 3445 3446 static int 3447 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 3448 { 3449 struct mbuf *m; 3450 int error; 3451 3452 /* 3453 * Load the DMA map so any coalescing is done. This 3454 * also calculates the number of descriptors we need. 3455 */ 3456 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 3457 bf->bf_segs, &bf->bf_nseg, 3458 BUS_DMA_NOWAIT); 3459 if (error == EFBIG) { 3460 /* XXX packet requires too many descriptors */ 3461 bf->bf_nseg = ATH_TXDESC+1; 3462 } else if (error != 0) { 3463 sc->sc_stats.ast_tx_busdma++; 3464 m_freem(m0); 3465 return error; 3466 } 3467 /* 3468 * Discard null packets and check for packets that 3469 * require too many TX descriptors. We try to convert 3470 * the latter to a cluster. 3471 */ 3472 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 3473 sc->sc_stats.ast_tx_linear++; 3474 m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); 3475 if (m == NULL) { 3476 m_freem(m0); 3477 sc->sc_stats.ast_tx_nombuf++; 3478 return ENOMEM; 3479 } 3480 m0 = m; 3481 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 3482 bf->bf_segs, &bf->bf_nseg, 3483 BUS_DMA_NOWAIT); 3484 if (error != 0) { 3485 sc->sc_stats.ast_tx_busdma++; 3486 m_freem(m0); 3487 return error; 3488 } 3489 KASSERT(bf->bf_nseg <= ATH_TXDESC, 3490 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 3491 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3492 sc->sc_stats.ast_tx_nodata++; 3493 m_freem(m0); 3494 return EIO; 3495 } 3496 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 3497 __func__, m0, m0->m_pkthdr.len); 3498 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3499 bf->bf_m = m0; 3500 3501 return 0; 3502 } 3503 3504 static void 3505 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 3506 { 3507 struct ath_hal *ah = sc->sc_ah; 3508 struct ath_desc *ds, *ds0; 3509 int i; 3510 3511 /* 3512 * Fillin the remainder of the descriptor info. 3513 */ 3514 ds0 = ds = bf->bf_desc; 3515 for (i = 0; i < bf->bf_nseg; i++, ds++) { 3516 ds->ds_data = bf->bf_segs[i].ds_addr; 3517 if (i == bf->bf_nseg - 1) 3518 ds->ds_link = 0; 3519 else 3520 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 3521 ath_hal_filltxdesc(ah, ds 3522 , bf->bf_segs[i].ds_len /* segment length */ 3523 , i == 0 /* first segment */ 3524 , i == bf->bf_nseg - 1 /* last segment */ 3525 , ds0 /* first descriptor */ 3526 ); 3527 DPRINTF(sc, ATH_DEBUG_XMIT, 3528 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 3529 __func__, i, ds->ds_link, ds->ds_data, 3530 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 3531 } 3532 /* 3533 * Insert the frame on the outbound list and pass it on 3534 * to the hardware. Multicast frames buffered for power 3535 * save stations and transmit from the CAB queue are stored 3536 * on a s/w only queue and loaded on to the CAB queue in 3537 * the SWBA handler since frames only go out on DTIM and 3538 * to avoid possible races. 3539 */ 3540 ATH_TXQ_LOCK(txq); 3541 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 3542 if (txq != &sc->sc_mcastq) { 3543 if (txq->axq_link == NULL) { 3544 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 3545 DPRINTF(sc, ATH_DEBUG_XMIT, 3546 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 3547 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 3548 txq->axq_depth); 3549 } else { 3550 *txq->axq_link = bf->bf_daddr; 3551 DPRINTF(sc, ATH_DEBUG_XMIT, 3552 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 3553 txq->axq_qnum, txq->axq_link, 3554 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 3555 } 3556 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3557 ath_hal_txstart(ah, txq->axq_qnum); 3558 } else { 3559 if (txq->axq_link != NULL) 3560 *txq->axq_link = bf->bf_daddr; 3561 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3562 } 3563 ATH_TXQ_UNLOCK(txq); 3564 } 3565 3566 static int 3567 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 3568 struct mbuf *m0) 3569 { 3570 struct ieee80211com *ic = &sc->sc_ic; 3571 struct ath_hal *ah = sc->sc_ah; 3572 struct ifnet *ifp = sc->sc_ifp; 3573 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 3574 int error, iswep, ismcast, ismrr; 3575 int keyix, hdrlen, pktlen, try0; 3576 u_int8_t rix, txrate, ctsrate; 3577 u_int8_t cix = 0xff; /* NB: silence compiler */ 3578 struct ath_desc *ds; 3579 struct ath_txq *txq; 3580 struct ieee80211_frame *wh; 3581 u_int subtype, flags, ctsduration; 3582 HAL_PKT_TYPE atype; 3583 const HAL_RATE_TABLE *rt; 3584 HAL_BOOL shortPreamble; 3585 struct ath_node *an; 3586 u_int pri; 3587 3588 wh = mtod(m0, struct ieee80211_frame *); 3589 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 3590 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3591 hdrlen = ieee80211_anyhdrsize(wh); 3592 /* 3593 * Packet length must not include any 3594 * pad bytes; deduct them here. 3595 */ 3596 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 3597 3598 if (iswep) { 3599 const struct ieee80211_cipher *cip; 3600 struct ieee80211_key *k; 3601 3602 /* 3603 * Construct the 802.11 header+trailer for an encrypted 3604 * frame. The only reason this can fail is because of an 3605 * unknown or unsupported cipher/key type. 3606 */ 3607 k = ieee80211_crypto_encap(ic, ni, m0); 3608 if (k == NULL) { 3609 /* 3610 * This can happen when the key is yanked after the 3611 * frame was queued. Just discard the frame; the 3612 * 802.11 layer counts failures and provides 3613 * debugging/diagnostics. 3614 */ 3615 m_freem(m0); 3616 return EIO; 3617 } 3618 /* 3619 * Adjust the packet + header lengths for the crypto 3620 * additions and calculate the h/w key index. When 3621 * a s/w mic is done the frame will have had any mic 3622 * added to it prior to entry so m0->m_pkthdr.len above will 3623 * account for it. Otherwise we need to add it to the 3624 * packet length. 3625 */ 3626 cip = k->wk_cipher; 3627 hdrlen += cip->ic_header; 3628 pktlen += cip->ic_header + cip->ic_trailer; 3629 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 3630 pktlen += cip->ic_miclen; 3631 keyix = k->wk_keyix; 3632 3633 /* packet header may have moved, reset our local pointer */ 3634 wh = mtod(m0, struct ieee80211_frame *); 3635 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 3636 /* 3637 * Use station key cache slot, if assigned. 3638 */ 3639 keyix = ni->ni_ucastkey.wk_keyix; 3640 if (keyix == IEEE80211_KEYIX_NONE) 3641 keyix = HAL_TXKEYIX_INVALID; 3642 } else 3643 keyix = HAL_TXKEYIX_INVALID; 3644 3645 pktlen += IEEE80211_CRC_LEN; 3646 3647 /* 3648 * Load the DMA map so any coalescing is done. This 3649 * also calculates the number of descriptors we need. 3650 */ 3651 error = ath_tx_dmasetup(sc, bf, m0); 3652 if (error != 0) 3653 return error; 3654 bf->bf_node = ni; /* NB: held reference */ 3655 m0 = bf->bf_m; /* NB: may have changed */ 3656 wh = mtod(m0, struct ieee80211_frame *); 3657 3658 /* setup descriptors */ 3659 ds = bf->bf_desc; 3660 rt = sc->sc_currates; 3661 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3662 3663 /* 3664 * NB: the 802.11 layer marks whether or not we should 3665 * use short preamble based on the current mode and 3666 * negotiated parameters. 3667 */ 3668 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 3669 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 3670 shortPreamble = AH_TRUE; 3671 sc->sc_stats.ast_tx_shortpre++; 3672 } else { 3673 shortPreamble = AH_FALSE; 3674 } 3675 3676 an = ATH_NODE(ni); 3677 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 3678 ismrr = 0; /* default no multi-rate retry*/ 3679 /* 3680 * Calculate Atheros packet type from IEEE80211 packet header, 3681 * setup for rate calculations, and select h/w transmit queue. 3682 */ 3683 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3684 case IEEE80211_FC0_TYPE_MGT: 3685 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3686 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 3687 atype = HAL_PKT_TYPE_BEACON; 3688 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3689 atype = HAL_PKT_TYPE_PROBE_RESP; 3690 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 3691 atype = HAL_PKT_TYPE_ATIM; 3692 else 3693 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 3694 rix = sc->sc_minrateix; 3695 txrate = rt->info[rix].rateCode; 3696 if (shortPreamble) 3697 txrate |= rt->info[rix].shortPreamble; 3698 try0 = ATH_TXMGTTRY; 3699 /* NB: force all management frames to highest queue */ 3700 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3701 /* NB: force all management frames to highest queue */ 3702 pri = WME_AC_VO; 3703 } else 3704 pri = WME_AC_BE; 3705 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3706 break; 3707 case IEEE80211_FC0_TYPE_CTL: 3708 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 3709 rix = sc->sc_minrateix; 3710 txrate = rt->info[rix].rateCode; 3711 if (shortPreamble) 3712 txrate |= rt->info[rix].shortPreamble; 3713 try0 = ATH_TXMGTTRY; 3714 /* NB: force all ctl frames to highest queue */ 3715 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3716 /* NB: force all ctl frames to highest queue */ 3717 pri = WME_AC_VO; 3718 } else 3719 pri = WME_AC_BE; 3720 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3721 break; 3722 case IEEE80211_FC0_TYPE_DATA: 3723 atype = HAL_PKT_TYPE_NORMAL; /* default */ 3724 /* 3725 * Data frames: multicast frames go out at a fixed rate, 3726 * otherwise consult the rate control module for the 3727 * rate to use. 3728 */ 3729 if (ismcast) { 3730 /* 3731 * Check mcast rate setting in case it's changed. 3732 * XXX move out of fastpath 3733 */ 3734 if (ic->ic_mcast_rate != sc->sc_mcastrate) { 3735 sc->sc_mcastrix = 3736 ath_tx_findrix(rt, ic->ic_mcast_rate); 3737 sc->sc_mcastrate = ic->ic_mcast_rate; 3738 } 3739 rix = sc->sc_mcastrix; 3740 txrate = rt->info[rix].rateCode; 3741 if (shortPreamble) 3742 txrate |= rt->info[rix].shortPreamble; 3743 try0 = 1; 3744 } else { 3745 ath_rate_findrate(sc, an, shortPreamble, pktlen, 3746 &rix, &try0, &txrate); 3747 sc->sc_txrate = txrate; /* for LED blinking */ 3748 if (try0 != ATH_TXMAXTRY) 3749 ismrr = 1; 3750 } 3751 pri = M_WME_GETAC(m0); 3752 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 3753 flags |= HAL_TXDESC_NOACK; 3754 break; 3755 default: 3756 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 3757 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3758 /* XXX statistic */ 3759 m_freem(m0); 3760 return EIO; 3761 } 3762 txq = sc->sc_ac2q[pri]; 3763 3764 /* 3765 * When servicing one or more stations in power-save mode 3766 * (or) if there is some mcast data waiting on the mcast 3767 * queue (to prevent out of order delivery) multicast 3768 * frames must be buffered until after the beacon. 3769 */ 3770 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth)) { 3771 txq = &sc->sc_mcastq; 3772 /* XXX? more bit in 802.11 frame header */ 3773 } 3774 3775 /* 3776 * Calculate miscellaneous flags. 3777 */ 3778 if (ismcast) { 3779 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 3780 } else if (pktlen > ic->ic_rtsthreshold) { 3781 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 3782 cix = rt->info[rix].controlRate; 3783 sc->sc_stats.ast_tx_rts++; 3784 } 3785 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 3786 sc->sc_stats.ast_tx_noack++; 3787 3788 /* 3789 * If 802.11g protection is enabled, determine whether 3790 * to use RTS/CTS or just CTS. Note that this is only 3791 * done for OFDM unicast frames. 3792 */ 3793 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3794 rt->info[rix].phy == IEEE80211_T_OFDM && 3795 (flags & HAL_TXDESC_NOACK) == 0) { 3796 /* XXX fragments must use CCK rates w/ protection */ 3797 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3798 flags |= HAL_TXDESC_RTSENA; 3799 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3800 flags |= HAL_TXDESC_CTSENA; 3801 cix = rt->info[sc->sc_protrix].controlRate; 3802 sc->sc_stats.ast_tx_protect++; 3803 } 3804 3805 /* 3806 * Calculate duration. This logically belongs in the 802.11 3807 * layer but it lacks sufficient information to calculate it. 3808 */ 3809 if ((flags & HAL_TXDESC_NOACK) == 0 && 3810 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 3811 u_int16_t dur; 3812 /* 3813 * XXX not right with fragmentation. 3814 */ 3815 if (shortPreamble) 3816 dur = rt->info[rix].spAckDuration; 3817 else 3818 dur = rt->info[rix].lpAckDuration; 3819 *(u_int16_t *)wh->i_dur = htole16(dur); 3820 } 3821 3822 /* 3823 * Calculate RTS/CTS rate and duration if needed. 3824 */ 3825 ctsduration = 0; 3826 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 3827 /* 3828 * CTS transmit rate is derived from the transmit rate 3829 * by looking in the h/w rate table. We must also factor 3830 * in whether or not a short preamble is to be used. 3831 */ 3832 /* NB: cix is set above where RTS/CTS is enabled */ 3833 KASSERT(cix != 0xff, ("cix not setup")); 3834 ctsrate = rt->info[cix].rateCode; 3835 /* 3836 * Compute the transmit duration based on the frame 3837 * size and the size of an ACK frame. We call into the 3838 * HAL to do the computation since it depends on the 3839 * characteristics of the actual PHY being used. 3840 * 3841 * NB: CTS is assumed the same size as an ACK so we can 3842 * use the precalculated ACK durations. 3843 */ 3844 if (shortPreamble) { 3845 ctsrate |= rt->info[cix].shortPreamble; 3846 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3847 ctsduration += rt->info[cix].spAckDuration; 3848 ctsduration += ath_hal_computetxtime(ah, 3849 rt, pktlen, rix, AH_TRUE); 3850 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3851 ctsduration += rt->info[rix].spAckDuration; 3852 } else { 3853 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3854 ctsduration += rt->info[cix].lpAckDuration; 3855 ctsduration += ath_hal_computetxtime(ah, 3856 rt, pktlen, rix, AH_FALSE); 3857 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3858 ctsduration += rt->info[rix].lpAckDuration; 3859 } 3860 /* 3861 * Must disable multi-rate retry when using RTS/CTS. 3862 */ 3863 ismrr = 0; 3864 try0 = ATH_TXMGTTRY; /* XXX */ 3865 } else 3866 ctsrate = 0; 3867 3868 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3869 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 3870 sc->sc_hwmap[txrate].ieeerate, -1); 3871 3872 if (bpf_peers_present(ic->ic_rawbpf)) 3873 bpf_mtap(ic->ic_rawbpf, m0); 3874 if (bpf_peers_present(sc->sc_drvbpf)) { 3875 u_int64_t tsf = ath_hal_gettsf64(ah); 3876 3877 sc->sc_tx_th.wt_tsf = htole64(tsf); 3878 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 3879 if (iswep) 3880 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3881 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 3882 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3883 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3884 3885 bpf_mtap2(sc->sc_drvbpf, 3886 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 3887 } 3888 3889 /* 3890 * Determine if a tx interrupt should be generated for 3891 * this descriptor. We take a tx interrupt to reap 3892 * descriptors when the h/w hits an EOL condition or 3893 * when the descriptor is specifically marked to generate 3894 * an interrupt. We periodically mark descriptors in this 3895 * way to insure timely replenishing of the supply needed 3896 * for sending frames. Defering interrupts reduces system 3897 * load and potentially allows more concurrent work to be 3898 * done but if done to aggressively can cause senders to 3899 * backup. 3900 * 3901 * NB: use >= to deal with sc_txintrperiod changing 3902 * dynamically through sysctl. 3903 */ 3904 if (flags & HAL_TXDESC_INTREQ) { 3905 txq->axq_intrcnt = 0; 3906 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 3907 flags |= HAL_TXDESC_INTREQ; 3908 txq->axq_intrcnt = 0; 3909 } 3910 3911 /* 3912 * Formulate first tx descriptor with tx controls. 3913 */ 3914 /* XXX check return value? */ 3915 ath_hal_setuptxdesc(ah, ds 3916 , pktlen /* packet length */ 3917 , hdrlen /* header length */ 3918 , atype /* Atheros packet type */ 3919 , ni->ni_txpower /* txpower */ 3920 , txrate, try0 /* series 0 rate/tries */ 3921 , keyix /* key cache index */ 3922 , sc->sc_txantenna /* antenna mode */ 3923 , flags /* flags */ 3924 , ctsrate /* rts/cts rate */ 3925 , ctsduration /* rts/cts duration */ 3926 ); 3927 bf->bf_flags = flags; 3928 /* 3929 * Setup the multi-rate retry state only when we're 3930 * going to use it. This assumes ath_hal_setuptxdesc 3931 * initializes the descriptors (so we don't have to) 3932 * when the hardware supports multi-rate retry and 3933 * we don't use it. 3934 */ 3935 if (ismrr) 3936 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 3937 3938 ath_tx_handoff(sc, txq, bf); 3939 return 0; 3940 } 3941 3942 /* 3943 * Process completed xmit descriptors from the specified queue. 3944 */ 3945 static int 3946 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 3947 { 3948 struct ath_hal *ah = sc->sc_ah; 3949 struct ieee80211com *ic = &sc->sc_ic; 3950 struct ath_buf *bf; 3951 struct ath_desc *ds, *ds0; 3952 struct ath_tx_status *ts; 3953 struct ieee80211_node *ni; 3954 struct ath_node *an; 3955 int sr, lr, pri, nacked; 3956 HAL_STATUS status; 3957 3958 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3959 __func__, txq->axq_qnum, 3960 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3961 txq->axq_link); 3962 nacked = 0; 3963 for (;;) { 3964 ATH_TXQ_LOCK(txq); 3965 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3966 bf = STAILQ_FIRST(&txq->axq_q); 3967 if (bf == NULL) { 3968 ATH_TXQ_UNLOCK(txq); 3969 break; 3970 } 3971 ds0 = &bf->bf_desc[0]; 3972 ds = &bf->bf_desc[bf->bf_nseg - 1]; 3973 ts = &bf->bf_status.ds_txstat; 3974 status = ath_hal_txprocdesc(ah, ds, ts); 3975 #ifdef ATH_DEBUG 3976 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3977 ath_printtxbuf(bf, txq->axq_qnum, 0, status == HAL_OK); 3978 #endif 3979 if (status == HAL_EINPROGRESS) { 3980 ATH_TXQ_UNLOCK(txq); 3981 break; 3982 } 3983 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3984 if (txq->axq_depth == 0) 3985 txq->axq_link = NULL; 3986 ATH_TXQ_UNLOCK(txq); 3987 3988 ni = bf->bf_node; 3989 if (ni != NULL) { 3990 an = ATH_NODE(ni); 3991 if (ts->ts_status == 0) { 3992 u_int8_t txant = ts->ts_antenna; 3993 sc->sc_stats.ast_ant_tx[txant]++; 3994 sc->sc_ant_tx[txant]++; 3995 if (ts->ts_rate & HAL_TXSTAT_ALTRATE) 3996 sc->sc_stats.ast_tx_altrate++; 3997 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3998 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3999 ts->ts_rssi); 4000 pri = M_WME_GETAC(bf->bf_m); 4001 if (pri >= WME_AC_VO) 4002 ic->ic_wme.wme_hipri_traffic++; 4003 ni->ni_inact = ni->ni_inact_reload; 4004 } else { 4005 if (ts->ts_status & HAL_TXERR_XRETRY) 4006 sc->sc_stats.ast_tx_xretries++; 4007 if (ts->ts_status & HAL_TXERR_FIFO) 4008 sc->sc_stats.ast_tx_fifoerr++; 4009 if (ts->ts_status & HAL_TXERR_FILT) 4010 sc->sc_stats.ast_tx_filtered++; 4011 } 4012 sr = ts->ts_shortretry; 4013 lr = ts->ts_longretry; 4014 sc->sc_stats.ast_tx_shortretry += sr; 4015 sc->sc_stats.ast_tx_longretry += lr; 4016 /* 4017 * Hand the descriptor to the rate control algorithm. 4018 */ 4019 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4020 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) { 4021 /* 4022 * If frame was ack'd update the last rx time 4023 * used to workaround phantom bmiss interrupts. 4024 */ 4025 if (ts->ts_status == 0) 4026 nacked++; 4027 ath_rate_tx_complete(sc, an, bf); 4028 } 4029 /* 4030 * Reclaim reference to node. 4031 * 4032 * NB: the node may be reclaimed here if, for example 4033 * this is a DEAUTH message that was sent and the 4034 * node was timed out due to inactivity. 4035 */ 4036 ieee80211_free_node(ni); 4037 } 4038 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4039 BUS_DMASYNC_POSTWRITE); 4040 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4041 m_freem(bf->bf_m); 4042 bf->bf_m = NULL; 4043 bf->bf_node = NULL; 4044 4045 ATH_TXBUF_LOCK(sc); 4046 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4047 ATH_TXBUF_UNLOCK(sc); 4048 } 4049 return nacked; 4050 } 4051 4052 static __inline int 4053 txqactive(struct ath_hal *ah, int qnum) 4054 { 4055 u_int32_t txqs = 1<<qnum; 4056 ath_hal_gettxintrtxqs(ah, &txqs); 4057 return (txqs & (1<<qnum)); 4058 } 4059 4060 /* 4061 * Deferred processing of transmit interrupt; special-cased 4062 * for a single hardware transmit queue (e.g. 5210 and 5211). 4063 */ 4064 static void 4065 ath_tx_proc_q0(void *arg, int npending) 4066 { 4067 struct ath_softc *sc = arg; 4068 struct ifnet *ifp = sc->sc_ifp; 4069 4070 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0])) 4071 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4072 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 4073 ath_tx_processq(sc, sc->sc_cabq); 4074 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4075 sc->sc_tx_timer = 0; 4076 4077 if (sc->sc_softled) 4078 ath_led_event(sc, ATH_LED_TX); 4079 4080 ath_start(ifp); 4081 } 4082 4083 /* 4084 * Deferred processing of transmit interrupt; special-cased 4085 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4086 */ 4087 static void 4088 ath_tx_proc_q0123(void *arg, int npending) 4089 { 4090 struct ath_softc *sc = arg; 4091 struct ifnet *ifp = sc->sc_ifp; 4092 int nacked; 4093 4094 /* 4095 * Process each active queue. 4096 */ 4097 nacked = 0; 4098 if (txqactive(sc->sc_ah, 0)) 4099 nacked += ath_tx_processq(sc, &sc->sc_txq[0]); 4100 if (txqactive(sc->sc_ah, 1)) 4101 nacked += ath_tx_processq(sc, &sc->sc_txq[1]); 4102 if (txqactive(sc->sc_ah, 2)) 4103 nacked += ath_tx_processq(sc, &sc->sc_txq[2]); 4104 if (txqactive(sc->sc_ah, 3)) 4105 nacked += ath_tx_processq(sc, &sc->sc_txq[3]); 4106 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 4107 ath_tx_processq(sc, sc->sc_cabq); 4108 if (nacked) 4109 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4110 4111 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4112 sc->sc_tx_timer = 0; 4113 4114 if (sc->sc_softled) 4115 ath_led_event(sc, ATH_LED_TX); 4116 4117 ath_start(ifp); 4118 } 4119 4120 /* 4121 * Deferred processing of transmit interrupt. 4122 */ 4123 static void 4124 ath_tx_proc(void *arg, int npending) 4125 { 4126 struct ath_softc *sc = arg; 4127 struct ifnet *ifp = sc->sc_ifp; 4128 int i, nacked; 4129 4130 /* 4131 * Process each active queue. 4132 */ 4133 nacked = 0; 4134 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4135 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) 4136 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 4137 if (nacked) 4138 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4139 4140 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4141 sc->sc_tx_timer = 0; 4142 4143 if (sc->sc_softled) 4144 ath_led_event(sc, ATH_LED_TX); 4145 4146 ath_start(ifp); 4147 } 4148 4149 static void 4150 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4151 { 4152 #ifdef ATH_DEBUG 4153 struct ath_hal *ah = sc->sc_ah; 4154 #endif 4155 struct ieee80211_node *ni; 4156 struct ath_buf *bf; 4157 u_int ix; 4158 4159 /* 4160 * NB: this assumes output has been stopped and 4161 * we do not need to block ath_tx_tasklet 4162 */ 4163 for (ix = 0;; ix++) { 4164 ATH_TXQ_LOCK(txq); 4165 bf = STAILQ_FIRST(&txq->axq_q); 4166 if (bf == NULL) { 4167 txq->axq_link = NULL; 4168 ATH_TXQ_UNLOCK(txq); 4169 break; 4170 } 4171 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 4172 ATH_TXQ_UNLOCK(txq); 4173 #ifdef ATH_DEBUG 4174 if (sc->sc_debug & ATH_DEBUG_RESET) { 4175 ath_printtxbuf(bf, txq->axq_qnum, ix, 4176 ath_hal_txprocdesc(ah, bf->bf_desc, 4177 &bf->bf_status.ds_txstat) == HAL_OK); 4178 ieee80211_dump_pkt(mtod(bf->bf_m, caddr_t), 4179 bf->bf_m->m_len, 0, -1); 4180 } 4181 #endif /* ATH_DEBUG */ 4182 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4183 m_freem(bf->bf_m); 4184 bf->bf_m = NULL; 4185 ni = bf->bf_node; 4186 bf->bf_node = NULL; 4187 if (ni != NULL) { 4188 /* 4189 * Reclaim node reference. 4190 */ 4191 ieee80211_free_node(ni); 4192 } 4193 ATH_TXBUF_LOCK(sc); 4194 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4195 ATH_TXBUF_UNLOCK(sc); 4196 } 4197 } 4198 4199 static void 4200 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4201 { 4202 struct ath_hal *ah = sc->sc_ah; 4203 4204 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4205 __func__, txq->axq_qnum, 4206 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4207 txq->axq_link); 4208 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4209 } 4210 4211 /* 4212 * Drain the transmit queues and reclaim resources. 4213 */ 4214 static void 4215 ath_draintxq(struct ath_softc *sc) 4216 { 4217 struct ath_hal *ah = sc->sc_ah; 4218 struct ifnet *ifp = sc->sc_ifp; 4219 int i; 4220 4221 /* XXX return value */ 4222 if (!sc->sc_invalid) { 4223 /* don't touch the hardware if marked invalid */ 4224 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4225 __func__, sc->sc_bhalq, 4226 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4227 NULL); 4228 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4229 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4230 if (ATH_TXQ_SETUP(sc, i)) 4231 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4232 } 4233 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4234 if (ATH_TXQ_SETUP(sc, i)) 4235 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4236 ath_tx_draintxq(sc, &sc->sc_mcastq); 4237 #ifdef ATH_DEBUG 4238 if (sc->sc_debug & ATH_DEBUG_RESET) { 4239 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 4240 if (bf != NULL && bf->bf_m != NULL) { 4241 ath_printtxbuf(bf, sc->sc_bhalq, 0, 4242 ath_hal_txprocdesc(ah, bf->bf_desc, 4243 &bf->bf_status.ds_txstat) == HAL_OK); 4244 ieee80211_dump_pkt(mtod(bf->bf_m, caddr_t), 4245 bf->bf_m->m_len, 0, -1); 4246 } 4247 } 4248 #endif /* ATH_DEBUG */ 4249 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4250 sc->sc_tx_timer = 0; 4251 } 4252 4253 /* 4254 * Disable the receive h/w in preparation for a reset. 4255 */ 4256 static void 4257 ath_stoprecv(struct ath_softc *sc) 4258 { 4259 #define PA2DESC(_sc, _pa) \ 4260 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4261 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4262 struct ath_hal *ah = sc->sc_ah; 4263 4264 ath_hal_stoppcurecv(ah); /* disable PCU */ 4265 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 4266 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 4267 DELAY(3000); /* 3ms is long enough for 1 frame */ 4268 #ifdef ATH_DEBUG 4269 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 4270 struct ath_buf *bf; 4271 u_int ix; 4272 4273 printf("%s: rx queue %p, link %p\n", __func__, 4274 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 4275 ix = 0; 4276 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4277 struct ath_desc *ds = bf->bf_desc; 4278 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 4279 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 4280 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4281 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 4282 ath_printrxbuf(bf, ix, status == HAL_OK); 4283 ix++; 4284 } 4285 } 4286 #endif 4287 sc->sc_rxlink = NULL; /* just in case */ 4288 #undef PA2DESC 4289 } 4290 4291 /* 4292 * Enable the receive h/w following a reset. 4293 */ 4294 static int 4295 ath_startrecv(struct ath_softc *sc) 4296 { 4297 struct ath_hal *ah = sc->sc_ah; 4298 struct ath_buf *bf; 4299 4300 sc->sc_rxlink = NULL; 4301 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4302 int error = ath_rxbuf_init(sc, bf); 4303 if (error != 0) { 4304 DPRINTF(sc, ATH_DEBUG_RECV, 4305 "%s: ath_rxbuf_init failed %d\n", 4306 __func__, error); 4307 return error; 4308 } 4309 } 4310 4311 bf = STAILQ_FIRST(&sc->sc_rxbuf); 4312 ath_hal_putrxbuf(ah, bf->bf_daddr); 4313 ath_hal_rxena(ah); /* enable recv descriptors */ 4314 ath_mode_init(sc); /* set filters, etc. */ 4315 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4316 return 0; 4317 } 4318 4319 /* 4320 * Update internal state after a channel change. 4321 */ 4322 static void 4323 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4324 { 4325 struct ieee80211com *ic = &sc->sc_ic; 4326 enum ieee80211_phymode mode; 4327 u_int16_t flags; 4328 4329 /* 4330 * Change channels and update the h/w rate map 4331 * if we're switching; e.g. 11a to 11b/g. 4332 */ 4333 if (IEEE80211_IS_CHAN_HALF(chan)) 4334 mode = IEEE80211_MODE_HALF; 4335 else if (IEEE80211_IS_CHAN_QUARTER(chan)) 4336 mode = IEEE80211_MODE_QUARTER; 4337 else 4338 mode = ieee80211_chan2mode(ic, chan); 4339 if (mode != sc->sc_curmode) 4340 ath_setcurmode(sc, mode); 4341 /* 4342 * Update BPF state. NB: ethereal et. al. don't handle 4343 * merged flags well so pick a unique mode for their use. 4344 */ 4345 if (IEEE80211_IS_CHAN_A(chan)) 4346 flags = IEEE80211_CHAN_A; 4347 /* XXX 11g schizophrenia */ 4348 else if (IEEE80211_IS_CHAN_ANYG(chan)) 4349 flags = IEEE80211_CHAN_G; 4350 else 4351 flags = IEEE80211_CHAN_B; 4352 if (IEEE80211_IS_CHAN_T(chan)) 4353 flags |= IEEE80211_CHAN_TURBO; 4354 if (IEEE80211_IS_CHAN_HALF(chan)) 4355 flags |= IEEE80211_CHAN_HALF; 4356 if (IEEE80211_IS_CHAN_QUARTER(chan)) 4357 flags |= IEEE80211_CHAN_QUARTER; 4358 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 4359 htole16(chan->ic_freq); 4360 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 4361 htole16(flags); 4362 } 4363 4364 /* 4365 * Poll for a channel clear indication; this is required 4366 * for channels requiring DFS and not previously visited 4367 * and/or with a recent radar detection. 4368 */ 4369 static void 4370 ath_dfswait(void *arg) 4371 { 4372 struct ath_softc *sc = arg; 4373 struct ath_hal *ah = sc->sc_ah; 4374 HAL_CHANNEL hchan; 4375 4376 ath_hal_radar_wait(ah, &hchan); 4377 DPRINTF(sc, ATH_DEBUG_DFS, "%s: radar_wait %u/%x/%x\n", 4378 __func__, hchan.channel, hchan.channelFlags, hchan.privFlags); 4379 4380 if (hchan.privFlags & CHANNEL_INTERFERENCE) { 4381 if_printf(sc->sc_ifp, 4382 "channel %u/0x%x/0x%x has interference\n", 4383 hchan.channel, hchan.channelFlags, hchan.privFlags); 4384 return; 4385 } 4386 if ((hchan.privFlags & CHANNEL_DFS) == 0) { 4387 /* XXX should not happen */ 4388 return; 4389 } 4390 if (hchan.privFlags & CHANNEL_DFS_CLEAR) { 4391 sc->sc_curchan.privFlags |= CHANNEL_DFS_CLEAR; 4392 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4393 if_printf(sc->sc_ifp, 4394 "channel %u/0x%x/0x%x marked clear\n", 4395 hchan.channel, hchan.channelFlags, hchan.privFlags); 4396 } else 4397 callout_reset(&sc->sc_dfs_ch, 2 * hz, ath_dfswait, sc); 4398 } 4399 4400 /* 4401 * Set/change channels. If the channel is really being changed, 4402 * it's done by reseting the chip. To accomplish this we must 4403 * first cleanup any pending DMA, then restart stuff after a la 4404 * ath_init. 4405 */ 4406 static int 4407 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4408 { 4409 struct ath_hal *ah = sc->sc_ah; 4410 struct ieee80211com *ic = &sc->sc_ic; 4411 HAL_CHANNEL hchan; 4412 4413 /* 4414 * Convert to a HAL channel description with 4415 * the flags constrained to reflect the current 4416 * operating mode. 4417 */ 4418 ath_mapchan(ic, &hchan, chan); 4419 4420 DPRINTF(sc, ATH_DEBUG_RESET, 4421 "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n", 4422 __func__, 4423 ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, 4424 sc->sc_curchan.channelFlags), 4425 sc->sc_curchan.channel, sc->sc_curchan.channelFlags, 4426 ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), 4427 hchan.channel, hchan.channelFlags); 4428 if (hchan.channel != sc->sc_curchan.channel || 4429 hchan.channelFlags != sc->sc_curchan.channelFlags) { 4430 HAL_STATUS status; 4431 4432 /* 4433 * To switch channels clear any pending DMA operations; 4434 * wait long enough for the RX fifo to drain, reset the 4435 * hardware at the new frequency, and then re-enable 4436 * the relevant bits of the h/w. 4437 */ 4438 ath_hal_intrset(ah, 0); /* disable interrupts */ 4439 ath_draintxq(sc); /* clear pending tx frames */ 4440 ath_stoprecv(sc); /* turn off frame recv */ 4441 if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) { 4442 if_printf(ic->ic_ifp, "%s: unable to reset " 4443 "channel %u (%u Mhz, flags 0x%x hal flags 0x%x)\n", 4444 __func__, ieee80211_chan2ieee(ic, chan), 4445 chan->ic_freq, chan->ic_flags, hchan.channelFlags); 4446 return EIO; 4447 } 4448 sc->sc_curchan = hchan; 4449 ath_update_txpow(sc); /* update tx power state */ 4450 sc->sc_diversity = ath_hal_getdiversity(ah); 4451 sc->sc_calinterval = 1; 4452 sc->sc_caltries = 0; 4453 4454 /* 4455 * Re-enable rx framework. 4456 */ 4457 if (ath_startrecv(sc) != 0) { 4458 if_printf(ic->ic_ifp, 4459 "%s: unable to restart recv logic\n", __func__); 4460 return EIO; 4461 } 4462 4463 /* 4464 * Change channels and update the h/w rate map 4465 * if we're switching; e.g. 11a to 11b/g. 4466 */ 4467 ic->ic_ibss_chan = chan; 4468 ath_chan_change(sc, chan); 4469 4470 /* 4471 * Handle DFS required waiting period to determine 4472 * if channel is clear of radar traffic. 4473 */ 4474 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 4475 #define DFS_AND_NOT_CLEAR(_c) \ 4476 (((_c)->privFlags & (CHANNEL_DFS | CHANNEL_DFS_CLEAR)) == CHANNEL_DFS) 4477 if (DFS_AND_NOT_CLEAR(&sc->sc_curchan)) { 4478 if_printf(sc->sc_ifp, 4479 "wait for DFS clear channel signal\n"); 4480 /* XXX stop sndq */ 4481 sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4482 callout_reset(&sc->sc_dfs_ch, 4483 2 * hz, ath_dfswait, sc); 4484 } else 4485 callout_stop(&sc->sc_dfs_ch); 4486 #undef DFS_NOT_CLEAR 4487 } 4488 4489 /* 4490 * Re-enable interrupts. 4491 */ 4492 ath_hal_intrset(ah, sc->sc_imask); 4493 } 4494 return 0; 4495 } 4496 4497 static void 4498 ath_next_scan(void *arg) 4499 { 4500 struct ath_softc *sc = arg; 4501 struct ieee80211com *ic = &sc->sc_ic; 4502 4503 if (ic->ic_state == IEEE80211_S_SCAN) 4504 ieee80211_next_scan(ic); 4505 } 4506 4507 /* 4508 * Periodically recalibrate the PHY to account 4509 * for temperature/environment changes. 4510 */ 4511 static void 4512 ath_calibrate(void *arg) 4513 { 4514 struct ath_softc *sc = arg; 4515 struct ath_hal *ah = sc->sc_ah; 4516 HAL_BOOL iqCalDone; 4517 4518 sc->sc_stats.ast_per_cal++; 4519 4520 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4521 /* 4522 * Rfgain is out of bounds, reset the chip 4523 * to load new gain values. 4524 */ 4525 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4526 "%s: rfgain change\n", __func__); 4527 sc->sc_stats.ast_per_rfgain++; 4528 ath_reset(sc->sc_ifp); 4529 } 4530 if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) { 4531 DPRINTF(sc, ATH_DEBUG_ANY, 4532 "%s: calibration of channel %u failed\n", 4533 __func__, sc->sc_curchan.channel); 4534 sc->sc_stats.ast_per_calfail++; 4535 } 4536 /* 4537 * Calibrate noise floor data again in case of change. 4538 */ 4539 ath_hal_process_noisefloor(ah); 4540 /* 4541 * Poll more frequently when the IQ calibration is in 4542 * progress to speedup loading the final settings. 4543 * We temper this aggressive polling with an exponential 4544 * back off after 4 tries up to ath_calinterval. 4545 */ 4546 if (iqCalDone || sc->sc_calinterval >= ath_calinterval) { 4547 sc->sc_caltries = 0; 4548 sc->sc_calinterval = ath_calinterval; 4549 } else if (sc->sc_caltries > 4) { 4550 sc->sc_caltries = 0; 4551 sc->sc_calinterval <<= 1; 4552 if (sc->sc_calinterval > ath_calinterval) 4553 sc->sc_calinterval = ath_calinterval; 4554 } 4555 KASSERT(0 < sc->sc_calinterval && sc->sc_calinterval <= ath_calinterval, 4556 ("bad calibration interval %u", sc->sc_calinterval)); 4557 4558 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4559 "%s: next +%u (%siqCalDone tries %u)\n", __func__, 4560 sc->sc_calinterval, iqCalDone ? "" : "!", sc->sc_caltries); 4561 sc->sc_caltries++; 4562 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 4563 ath_calibrate, sc); 4564 } 4565 4566 static int 4567 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 4568 { 4569 struct ifnet *ifp = ic->ic_ifp; 4570 struct ath_softc *sc = ifp->if_softc; 4571 struct ath_hal *ah = sc->sc_ah; 4572 struct ieee80211_node *ni; 4573 int i, error; 4574 const u_int8_t *bssid; 4575 u_int32_t rfilt; 4576 static const HAL_LED_STATE leds[] = { 4577 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4578 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4579 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4580 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4581 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4582 }; 4583 4584 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4585 ieee80211_state_name[ic->ic_state], 4586 ieee80211_state_name[nstate]); 4587 4588 callout_stop(&sc->sc_scan_ch); 4589 callout_stop(&sc->sc_cal_ch); 4590 callout_stop(&sc->sc_dfs_ch); 4591 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4592 4593 if (nstate == IEEE80211_S_INIT) { 4594 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4595 /* 4596 * NB: disable interrupts so we don't rx frames. 4597 */ 4598 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4599 /* 4600 * Notify the rate control algorithm. 4601 */ 4602 ath_rate_newstate(sc, nstate); 4603 goto done; 4604 } 4605 ni = ic->ic_bss; 4606 error = ath_chan_set(sc, ic->ic_curchan); 4607 if (error != 0) 4608 goto bad; 4609 rfilt = ath_calcrxfilter(sc, nstate); 4610 if (nstate == IEEE80211_S_SCAN) 4611 bssid = ifp->if_broadcastaddr; 4612 else 4613 bssid = ni->ni_bssid; 4614 ath_hal_setrxfilter(ah, rfilt); 4615 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", 4616 __func__, rfilt, ether_sprintf(bssid)); 4617 4618 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) 4619 ath_hal_setassocid(ah, bssid, ni->ni_associd); 4620 else 4621 ath_hal_setassocid(ah, bssid, 0); 4622 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 4623 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4624 if (ath_hal_keyisvalid(ah, i)) 4625 ath_hal_keysetmac(ah, i, bssid); 4626 } 4627 4628 /* 4629 * Notify the rate control algorithm so rates 4630 * are setup should ath_beacon_alloc be called. 4631 */ 4632 ath_rate_newstate(sc, nstate); 4633 4634 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4635 /* nothing to do */; 4636 } else if (nstate == IEEE80211_S_RUN) { 4637 DPRINTF(sc, ATH_DEBUG_STATE, 4638 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " 4639 "capinfo=0x%04x chan=%d\n" 4640 , __func__ 4641 , ic->ic_flags 4642 , ni->ni_intval 4643 , ether_sprintf(ni->ni_bssid) 4644 , ni->ni_capinfo 4645 , ieee80211_chan2ieee(ic, ic->ic_curchan)); 4646 4647 switch (ic->ic_opmode) { 4648 case IEEE80211_M_HOSTAP: 4649 case IEEE80211_M_IBSS: 4650 /* 4651 * Allocate and setup the beacon frame. 4652 * 4653 * Stop any previous beacon DMA. This may be 4654 * necessary, for example, when an ibss merge 4655 * causes reconfiguration; there will be a state 4656 * transition from RUN->RUN that means we may 4657 * be called with beacon transmission active. 4658 */ 4659 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4660 ath_beacon_free(sc); 4661 error = ath_beacon_alloc(sc, ni); 4662 if (error != 0) 4663 goto bad; 4664 /* 4665 * If joining an adhoc network defer beacon timer 4666 * configuration to the next beacon frame so we 4667 * have a current TSF to use. Otherwise we're 4668 * starting an ibss/bss so there's no need to delay. 4669 */ 4670 if (ic->ic_opmode == IEEE80211_M_IBSS && 4671 ic->ic_bss->ni_tstamp.tsf != 0) 4672 sc->sc_syncbeacon = 1; 4673 else 4674 ath_beacon_config(sc); 4675 break; 4676 case IEEE80211_M_STA: 4677 /* 4678 * Allocate a key cache slot to the station. 4679 */ 4680 if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && 4681 sc->sc_hasclrkey && 4682 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4683 ath_setup_stationkey(ni); 4684 /* 4685 * Defer beacon timer configuration to the next 4686 * beacon frame so we have a current TSF to use 4687 * (any TSF collected when scanning is likely old). 4688 */ 4689 sc->sc_syncbeacon = 1; 4690 break; 4691 default: 4692 break; 4693 } 4694 4695 /* 4696 * Let the hal process statistics collected during a 4697 * scan so it can provide calibrated noise floor data. 4698 */ 4699 ath_hal_process_noisefloor(ah); 4700 /* 4701 * Reset rssi stats; maybe not the best place... 4702 */ 4703 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4704 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4705 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4706 } else { 4707 ath_hal_intrset(ah, 4708 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4709 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4710 } 4711 done: 4712 /* 4713 * Invoke the parent method to complete the work. 4714 */ 4715 error = sc->sc_newstate(ic, nstate, arg); 4716 /* 4717 * Finally, start any timers. 4718 */ 4719 if (nstate == IEEE80211_S_RUN) { 4720 /* start periodic recalibration timer */ 4721 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 4722 ath_calibrate, sc); 4723 } else if (nstate == IEEE80211_S_SCAN) { 4724 /* start ap/neighbor scan timer */ 4725 callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, 4726 ath_next_scan, sc); 4727 } 4728 bad: 4729 return error; 4730 } 4731 4732 /* 4733 * Allocate a key cache slot to the station so we can 4734 * setup a mapping from key index to node. The key cache 4735 * slot is needed for managing antenna state and for 4736 * compression when stations do not use crypto. We do 4737 * it uniliaterally here; if crypto is employed this slot 4738 * will be reassigned. 4739 */ 4740 static void 4741 ath_setup_stationkey(struct ieee80211_node *ni) 4742 { 4743 struct ieee80211com *ic = ni->ni_ic; 4744 struct ath_softc *sc = ic->ic_ifp->if_softc; 4745 ieee80211_keyix keyix, rxkeyix; 4746 4747 if (!ath_key_alloc(ic, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4748 /* 4749 * Key cache is full; we'll fall back to doing 4750 * the more expensive lookup in software. Note 4751 * this also means no h/w compression. 4752 */ 4753 /* XXX msg+statistic */ 4754 } else { 4755 /* XXX locking? */ 4756 ni->ni_ucastkey.wk_keyix = keyix; 4757 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4758 /* NB: this will create a pass-thru key entry */ 4759 ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, ic->ic_bss); 4760 } 4761 } 4762 4763 /* 4764 * Setup driver-specific state for a newly associated node. 4765 * Note that we're called also on a re-associate, the isnew 4766 * param tells us if this is the first time or not. 4767 */ 4768 static void 4769 ath_newassoc(struct ieee80211_node *ni, int isnew) 4770 { 4771 struct ieee80211com *ic = ni->ni_ic; 4772 struct ath_softc *sc = ic->ic_ifp->if_softc; 4773 4774 ath_rate_newassoc(sc, ATH_NODE(ni), isnew); 4775 if (isnew && 4776 (ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey) { 4777 KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE, 4778 ("new assoc with a unicast key already setup (keyix %u)", 4779 ni->ni_ucastkey.wk_keyix)); 4780 ath_setup_stationkey(ni); 4781 } 4782 } 4783 4784 static int 4785 ath_getchannels(struct ath_softc *sc, 4786 HAL_REG_DOMAIN rd, HAL_CTRY_CODE cc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 4787 { 4788 #define COMPAT \ 4789 (CHANNEL_ALL_NOTURBO|CHANNEL_PASSIVE|CHANNEL_HALF|CHANNEL_QUARTER) 4790 #define IS_CHAN_PUBLIC_SAFETY(_c) \ 4791 (((_c)->channelFlags & CHANNEL_5GHZ) && \ 4792 ((_c)->channel > 4940 && (_c)->channel < 4990)) 4793 struct ieee80211com *ic = &sc->sc_ic; 4794 struct ifnet *ifp = sc->sc_ifp; 4795 struct ath_hal *ah = sc->sc_ah; 4796 HAL_CHANNEL *chans; 4797 int i, ix, nchan; 4798 u_int32_t regdomain; 4799 4800 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 4801 M_TEMP, M_NOWAIT); 4802 if (chans == NULL) { 4803 if_printf(ifp, "unable to allocate channel table\n"); 4804 return ENOMEM; 4805 } 4806 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 4807 NULL, 0, NULL, cc, HAL_MODE_ALL, outdoor, xchanmode)) { 4808 ath_hal_getregdomain(ah, ®domain); 4809 if_printf(ifp, "unable to collect channel list from hal; " 4810 "regdomain likely %u country code %u\n", regdomain, cc); 4811 free(chans, M_TEMP); 4812 return EINVAL; 4813 } 4814 4815 /* 4816 * Convert HAL channels to ieee80211 ones and insert 4817 * them in the table according to their channel number. 4818 */ 4819 memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); 4820 for (i = 0; i < nchan; i++) { 4821 HAL_CHANNEL *c = &chans[i]; 4822 u_int16_t flags; 4823 4824 /* 4825 * XXX we're not ready to handle the ieee number mapping 4826 * for public safety channels as they overlap with any 4827 * 2GHz channels; for now use a non-public safety 4828 * numbering that is non-overlapping. 4829 */ 4830 ix = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags); 4831 if (IS_CHAN_PUBLIC_SAFETY(c)) 4832 ix += 37; /* XXX */ 4833 if (ix > IEEE80211_CHAN_MAX) { 4834 if_printf(ifp, "bad hal channel %d (%u/%x) ignored\n", 4835 ix, c->channel, c->channelFlags); 4836 continue; 4837 } 4838 if (ix < 0) { 4839 /* XXX can't handle stuff <2400 right now */ 4840 if (bootverbose) 4841 if_printf(ifp, "hal channel %d (%u/%x) " 4842 "cannot be handled; ignored\n", 4843 ix, c->channel, c->channelFlags); 4844 continue; 4845 } 4846 if (bootverbose) 4847 if_printf(ifp, "hal channel %u/%x -> %u\n", 4848 c->channel, c->channelFlags, ix); 4849 /* 4850 * Calculate net80211 flags; most are compatible 4851 * but some need massaging. Note the static turbo 4852 * conversion can be removed once net80211 is updated 4853 * to understand static vs. dynamic turbo. 4854 */ 4855 flags = c->channelFlags & COMPAT; 4856 if (c->channelFlags & CHANNEL_STURBO) 4857 flags |= IEEE80211_CHAN_TURBO; 4858 if (ath_hal_isgsmsku(ah)) { 4859 /* remap to true frequencies */ 4860 c->channel = 922 + (2422 - c->channel); 4861 flags |= IEEE80211_CHAN_GSM; 4862 ix = ieee80211_mhz2ieee(c->channel, flags); 4863 } 4864 if (ic->ic_channels[ix].ic_freq == 0) { 4865 ic->ic_channels[ix].ic_freq = c->channel; 4866 ic->ic_channels[ix].ic_flags = flags; 4867 } else { 4868 /* channels overlap; e.g. 11g and 11b */ 4869 ic->ic_channels[ix].ic_flags |= flags; 4870 } 4871 } 4872 free(chans, M_TEMP); 4873 ath_hal_getregdomain(ah, &sc->sc_regdomain); 4874 ath_hal_getcountrycode(ah, &sc->sc_countrycode); 4875 sc->sc_xchanmode = xchanmode; 4876 sc->sc_outdoor = outdoor; 4877 return 0; 4878 #undef IS_CHAN_PUBLIC_SAFETY 4879 #undef COMPAT 4880 } 4881 4882 static void 4883 ath_led_done(void *arg) 4884 { 4885 struct ath_softc *sc = arg; 4886 4887 sc->sc_blinking = 0; 4888 } 4889 4890 /* 4891 * Turn the LED off: flip the pin and then set a timer so no 4892 * update will happen for the specified duration. 4893 */ 4894 static void 4895 ath_led_off(void *arg) 4896 { 4897 struct ath_softc *sc = arg; 4898 4899 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 4900 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); 4901 } 4902 4903 /* 4904 * Blink the LED according to the specified on/off times. 4905 */ 4906 static void 4907 ath_led_blink(struct ath_softc *sc, int on, int off) 4908 { 4909 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); 4910 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); 4911 sc->sc_blinking = 1; 4912 sc->sc_ledoff = off; 4913 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); 4914 } 4915 4916 static void 4917 ath_led_event(struct ath_softc *sc, int event) 4918 { 4919 4920 sc->sc_ledevent = ticks; /* time of last event */ 4921 if (sc->sc_blinking) /* don't interrupt active blink */ 4922 return; 4923 switch (event) { 4924 case ATH_LED_POLL: 4925 ath_led_blink(sc, sc->sc_hwmap[0].ledon, 4926 sc->sc_hwmap[0].ledoff); 4927 break; 4928 case ATH_LED_TX: 4929 ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, 4930 sc->sc_hwmap[sc->sc_txrate].ledoff); 4931 break; 4932 case ATH_LED_RX: 4933 ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, 4934 sc->sc_hwmap[sc->sc_rxrate].ledoff); 4935 break; 4936 } 4937 } 4938 4939 static void 4940 ath_update_txpow(struct ath_softc *sc) 4941 { 4942 struct ieee80211com *ic = &sc->sc_ic; 4943 struct ath_hal *ah = sc->sc_ah; 4944 u_int32_t txpow; 4945 4946 if (sc->sc_curtxpow != ic->ic_txpowlimit) { 4947 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 4948 /* read back in case value is clamped */ 4949 ath_hal_gettxpowlimit(ah, &txpow); 4950 ic->ic_txpowlimit = sc->sc_curtxpow = txpow; 4951 } 4952 /* 4953 * Fetch max tx power level for status requests. 4954 */ 4955 ath_hal_getmaxtxpow(sc->sc_ah, &txpow); 4956 ic->ic_bss->ni_txpower = txpow; 4957 } 4958 4959 static int 4960 ath_rate_setup(struct ath_softc *sc, u_int mode) 4961 { 4962 struct ath_hal *ah = sc->sc_ah; 4963 const HAL_RATE_TABLE *rt; 4964 4965 switch (mode) { 4966 case IEEE80211_MODE_11A: 4967 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4968 break; 4969 case IEEE80211_MODE_HALF: 4970 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4971 break; 4972 case IEEE80211_MODE_QUARTER: 4973 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4974 break; 4975 case IEEE80211_MODE_11B: 4976 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4977 break; 4978 case IEEE80211_MODE_11G: 4979 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4980 break; 4981 case IEEE80211_MODE_TURBO_A: 4982 /* XXX until static/dynamic turbo is fixed */ 4983 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4984 break; 4985 case IEEE80211_MODE_TURBO_G: 4986 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4987 break; 4988 default: 4989 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4990 __func__, mode); 4991 return 0; 4992 } 4993 sc->sc_rates[mode] = rt; 4994 return (rt != NULL); 4995 } 4996 4997 static void 4998 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4999 { 5000 #define N(a) (sizeof(a)/sizeof(a[0])) 5001 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 5002 static const struct { 5003 u_int rate; /* tx/rx 802.11 rate */ 5004 u_int16_t timeOn; /* LED on time (ms) */ 5005 u_int16_t timeOff; /* LED off time (ms) */ 5006 } blinkrates[] = { 5007 { 108, 40, 10 }, 5008 { 96, 44, 11 }, 5009 { 72, 50, 13 }, 5010 { 48, 57, 14 }, 5011 { 36, 67, 16 }, 5012 { 24, 80, 20 }, 5013 { 22, 100, 25 }, 5014 { 18, 133, 34 }, 5015 { 12, 160, 40 }, 5016 { 10, 200, 50 }, 5017 { 6, 240, 58 }, 5018 { 4, 267, 66 }, 5019 { 2, 400, 100 }, 5020 { 0, 500, 130 }, 5021 /* XXX half/quarter rates */ 5022 }; 5023 const HAL_RATE_TABLE *rt; 5024 int i, j; 5025 5026 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 5027 rt = sc->sc_rates[mode]; 5028 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 5029 for (i = 0; i < rt->rateCount; i++) 5030 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 5031 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 5032 for (i = 0; i < 32; i++) { 5033 u_int8_t ix = rt->rateCodeToIndex[i]; 5034 if (ix == 0xff) { 5035 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 5036 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 5037 continue; 5038 } 5039 sc->sc_hwmap[i].ieeerate = 5040 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 5041 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 5042 if (rt->info[ix].shortPreamble || 5043 rt->info[ix].phy == IEEE80211_T_OFDM) 5044 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 5045 /* NB: receive frames include FCS */ 5046 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | 5047 IEEE80211_RADIOTAP_F_FCS; 5048 /* setup blink rate table to avoid per-packet lookup */ 5049 for (j = 0; j < N(blinkrates)-1; j++) 5050 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 5051 break; 5052 /* NB: this uses the last entry if the rate isn't found */ 5053 /* XXX beware of overlow */ 5054 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 5055 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 5056 } 5057 sc->sc_currates = rt; 5058 sc->sc_curmode = mode; 5059 /* 5060 * All protection frames are transmited at 2Mb/s for 5061 * 11g, otherwise at 1Mb/s. 5062 */ 5063 if (mode == IEEE80211_MODE_11G) 5064 sc->sc_protrix = ath_tx_findrix(rt, 2*2); 5065 else 5066 sc->sc_protrix = ath_tx_findrix(rt, 2*1); 5067 /* rate index used to send management frames */ 5068 sc->sc_minrateix = 0; 5069 /* 5070 * Setup multicast rate state. 5071 */ 5072 /* XXX layering violation */ 5073 sc->sc_mcastrix = ath_tx_findrix(rt, sc->sc_ic.ic_mcast_rate); 5074 sc->sc_mcastrate = sc->sc_ic.ic_mcast_rate; 5075 /* NB: caller is responsible for reseting rate control state */ 5076 #undef N 5077 } 5078 5079 #ifdef ATH_DEBUG 5080 static void 5081 ath_printrxbuf(const struct ath_buf *bf, u_int ix, int done) 5082 { 5083 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5084 const struct ath_desc *ds; 5085 int i; 5086 5087 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 5088 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n" 5089 " %08x %08x %08x %08x\n", 5090 ix, ds, (const struct ath_desc *)bf->bf_daddr + i, 5091 ds->ds_link, ds->ds_data, 5092 !done ? "" : (rs->rs_status == 0) ? " *" : " !", 5093 ds->ds_ctl0, ds->ds_ctl1, 5094 ds->ds_hw[0], ds->ds_hw[1]); 5095 } 5096 } 5097 5098 static void 5099 ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done) 5100 { 5101 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 5102 const struct ath_desc *ds; 5103 int i; 5104 5105 printf("Q%u[%3u]", qnum, ix); 5106 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 5107 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n" 5108 " %08x %08x %08x %08x %08x %08x\n", 5109 ds, (const struct ath_desc *)bf->bf_daddr + i, 5110 ds->ds_link, ds->ds_data, bf->bf_flags, 5111 !done ? "" : (ts->ts_status == 0) ? " *" : " !", 5112 ds->ds_ctl0, ds->ds_ctl1, 5113 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]); 5114 } 5115 } 5116 #endif /* ATH_DEBUG */ 5117 5118 static void 5119 ath_watchdog(struct ifnet *ifp) 5120 { 5121 struct ath_softc *sc = ifp->if_softc; 5122 struct ieee80211com *ic = &sc->sc_ic; 5123 5124 ifp->if_timer = 0; 5125 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 5126 return; 5127 if (sc->sc_tx_timer) { 5128 if (--sc->sc_tx_timer == 0) { 5129 if_printf(ifp, "device timeout\n"); 5130 ath_reset(ifp); 5131 ifp->if_oerrors++; 5132 sc->sc_stats.ast_watchdog++; 5133 } else 5134 ifp->if_timer = 1; 5135 } 5136 ieee80211_watchdog(ic); 5137 } 5138 5139 #ifdef ATH_DIAGAPI 5140 /* 5141 * Diagnostic interface to the HAL. This is used by various 5142 * tools to do things like retrieve register contents for 5143 * debugging. The mechanism is intentionally opaque so that 5144 * it can change frequently w/o concern for compatiblity. 5145 */ 5146 static int 5147 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5148 { 5149 struct ath_hal *ah = sc->sc_ah; 5150 u_int id = ad->ad_id & ATH_DIAG_ID; 5151 void *indata = NULL; 5152 void *outdata = NULL; 5153 u_int32_t insize = ad->ad_in_size; 5154 u_int32_t outsize = ad->ad_out_size; 5155 int error = 0; 5156 5157 if (ad->ad_id & ATH_DIAG_IN) { 5158 /* 5159 * Copy in data. 5160 */ 5161 indata = malloc(insize, M_TEMP, M_NOWAIT); 5162 if (indata == NULL) { 5163 error = ENOMEM; 5164 goto bad; 5165 } 5166 error = copyin(ad->ad_in_data, indata, insize); 5167 if (error) 5168 goto bad; 5169 } 5170 if (ad->ad_id & ATH_DIAG_DYN) { 5171 /* 5172 * Allocate a buffer for the results (otherwise the HAL 5173 * returns a pointer to a buffer where we can read the 5174 * results). Note that we depend on the HAL leaving this 5175 * pointer for us to use below in reclaiming the buffer; 5176 * may want to be more defensive. 5177 */ 5178 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5179 if (outdata == NULL) { 5180 error = ENOMEM; 5181 goto bad; 5182 } 5183 } 5184 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5185 if (outsize < ad->ad_out_size) 5186 ad->ad_out_size = outsize; 5187 if (outdata != NULL) 5188 error = copyout(outdata, ad->ad_out_data, 5189 ad->ad_out_size); 5190 } else { 5191 error = EINVAL; 5192 } 5193 bad: 5194 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5195 free(indata, M_TEMP); 5196 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5197 free(outdata, M_TEMP); 5198 return error; 5199 } 5200 #endif /* ATH_DIAGAPI */ 5201 5202 static int 5203 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5204 { 5205 #define IS_RUNNING(ifp) \ 5206 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5207 struct ath_softc *sc = ifp->if_softc; 5208 struct ieee80211com *ic = &sc->sc_ic; 5209 struct ifreq *ifr = (struct ifreq *)data; 5210 int error = 0; 5211 5212 ATH_LOCK(sc); 5213 switch (cmd) { 5214 case SIOCSIFFLAGS: 5215 if (IS_RUNNING(ifp)) { 5216 /* 5217 * To avoid rescanning another access point, 5218 * do not call ath_init() here. Instead, 5219 * only reflect promisc mode settings. 5220 */ 5221 ath_mode_init(sc); 5222 } else if (ifp->if_flags & IFF_UP) { 5223 /* 5224 * Beware of being called during attach/detach 5225 * to reset promiscuous mode. In that case we 5226 * will still be marked UP but not RUNNING. 5227 * However trying to re-init the interface 5228 * is the wrong thing to do as we've already 5229 * torn down much of our state. There's 5230 * probably a better way to deal with this. 5231 */ 5232 if (!sc->sc_invalid && ic->ic_bss != NULL) 5233 ath_init(sc); /* XXX lose error */ 5234 } else 5235 ath_stop_locked(ifp); 5236 break; 5237 case SIOCADDMULTI: 5238 case SIOCDELMULTI: 5239 /* 5240 * The upper layer has already installed/removed 5241 * the multicast address(es), just recalculate the 5242 * multicast filter for the card. 5243 */ 5244 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5245 ath_mode_init(sc); 5246 break; 5247 case SIOCGATHSTATS: 5248 /* NB: embed these numbers to get a consistent view */ 5249 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5250 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5251 sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); 5252 sc->sc_stats.ast_rx_noise = 5253 ath_hal_getchannoise(sc->sc_ah, &sc->sc_curchan); 5254 sc->sc_stats.ast_tx_rate = sc->sc_hwmap[sc->sc_txrate].ieeerate; 5255 ATH_UNLOCK(sc); 5256 /* 5257 * NB: Drop the softc lock in case of a page fault; 5258 * we'll accept any potential inconsisentcy in the 5259 * statistics. The alternative is to copy the data 5260 * to a local structure. 5261 */ 5262 return copyout(&sc->sc_stats, 5263 ifr->ifr_data, sizeof (sc->sc_stats)); 5264 #ifdef ATH_DIAGAPI 5265 case SIOCGATHDIAG: 5266 ATH_UNLOCK(sc); 5267 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5268 ATH_LOCK(sc); 5269 break; 5270 #endif 5271 default: 5272 error = ieee80211_ioctl(ic, cmd, data); 5273 if (error == ENETRESET) { 5274 if (IS_RUNNING(ifp) && 5275 ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 5276 ath_init(sc); /* XXX lose error */ 5277 error = 0; 5278 } 5279 if (error == ERESTART) 5280 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; 5281 break; 5282 } 5283 ATH_UNLOCK(sc); 5284 return error; 5285 #undef IS_RUNNING 5286 } 5287 5288 static int 5289 ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) 5290 { 5291 struct ath_softc *sc = arg1; 5292 u_int slottime = ath_hal_getslottime(sc->sc_ah); 5293 int error; 5294 5295 error = sysctl_handle_int(oidp, &slottime, 0, req); 5296 if (error || !req->newptr) 5297 return error; 5298 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; 5299 } 5300 5301 static int 5302 ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) 5303 { 5304 struct ath_softc *sc = arg1; 5305 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); 5306 int error; 5307 5308 error = sysctl_handle_int(oidp, &acktimeout, 0, req); 5309 if (error || !req->newptr) 5310 return error; 5311 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; 5312 } 5313 5314 static int 5315 ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) 5316 { 5317 struct ath_softc *sc = arg1; 5318 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); 5319 int error; 5320 5321 error = sysctl_handle_int(oidp, &ctstimeout, 0, req); 5322 if (error || !req->newptr) 5323 return error; 5324 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; 5325 } 5326 5327 static int 5328 ath_sysctl_softled(SYSCTL_HANDLER_ARGS) 5329 { 5330 struct ath_softc *sc = arg1; 5331 int softled = sc->sc_softled; 5332 int error; 5333 5334 error = sysctl_handle_int(oidp, &softled, 0, req); 5335 if (error || !req->newptr) 5336 return error; 5337 softled = (softled != 0); 5338 if (softled != sc->sc_softled) { 5339 if (softled) { 5340 /* NB: handle any sc_ledpin change */ 5341 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 5342 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, 5343 !sc->sc_ledon); 5344 } 5345 sc->sc_softled = softled; 5346 } 5347 return 0; 5348 } 5349 5350 static int 5351 ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS) 5352 { 5353 struct ath_softc *sc = arg1; 5354 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah); 5355 int error; 5356 5357 error = sysctl_handle_int(oidp, &txantenna, 0, req); 5358 if (!error && req->newptr) { 5359 /* XXX assumes 2 antenna ports */ 5360 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) 5361 return EINVAL; 5362 ath_hal_setantennaswitch(sc->sc_ah, txantenna); 5363 /* 5364 * NB: with the switch locked this isn't meaningful, 5365 * but set it anyway so things like radiotap get 5366 * consistent info in their data. 5367 */ 5368 sc->sc_txantenna = txantenna; 5369 } 5370 return error; 5371 } 5372 5373 static int 5374 ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) 5375 { 5376 struct ath_softc *sc = arg1; 5377 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); 5378 int error; 5379 5380 error = sysctl_handle_int(oidp, &defantenna, 0, req); 5381 if (!error && req->newptr) 5382 ath_hal_setdefantenna(sc->sc_ah, defantenna); 5383 return error; 5384 } 5385 5386 static int 5387 ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) 5388 { 5389 struct ath_softc *sc = arg1; 5390 u_int diversity = ath_hal_getdiversity(sc->sc_ah); 5391 int error; 5392 5393 error = sysctl_handle_int(oidp, &diversity, 0, req); 5394 if (error || !req->newptr) 5395 return error; 5396 if (!ath_hal_setdiversity(sc->sc_ah, diversity)) 5397 return EINVAL; 5398 sc->sc_diversity = diversity; 5399 return 0; 5400 } 5401 5402 static int 5403 ath_sysctl_diag(SYSCTL_HANDLER_ARGS) 5404 { 5405 struct ath_softc *sc = arg1; 5406 u_int32_t diag; 5407 int error; 5408 5409 if (!ath_hal_getdiag(sc->sc_ah, &diag)) 5410 return EINVAL; 5411 error = sysctl_handle_int(oidp, &diag, 0, req); 5412 if (error || !req->newptr) 5413 return error; 5414 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; 5415 } 5416 5417 static int 5418 ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) 5419 { 5420 struct ath_softc *sc = arg1; 5421 struct ifnet *ifp = sc->sc_ifp; 5422 u_int32_t scale; 5423 int error; 5424 5425 ath_hal_gettpscale(sc->sc_ah, &scale); 5426 error = sysctl_handle_int(oidp, &scale, 0, req); 5427 if (error || !req->newptr) 5428 return error; 5429 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : 5430 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 5431 } 5432 5433 static int 5434 ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) 5435 { 5436 struct ath_softc *sc = arg1; 5437 u_int tpc = ath_hal_gettpc(sc->sc_ah); 5438 int error; 5439 5440 error = sysctl_handle_int(oidp, &tpc, 0, req); 5441 if (error || !req->newptr) 5442 return error; 5443 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; 5444 } 5445 5446 static int 5447 ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS) 5448 { 5449 struct ath_softc *sc = arg1; 5450 struct ifnet *ifp = sc->sc_ifp; 5451 struct ath_hal *ah = sc->sc_ah; 5452 u_int rfkill = ath_hal_getrfkill(ah); 5453 int error; 5454 5455 error = sysctl_handle_int(oidp, &rfkill, 0, req); 5456 if (error || !req->newptr) 5457 return error; 5458 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */ 5459 return 0; 5460 if (!ath_hal_setrfkill(ah, rfkill)) 5461 return EINVAL; 5462 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 5463 } 5464 5465 static int 5466 ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS) 5467 { 5468 struct ath_softc *sc = arg1; 5469 u_int rfsilent; 5470 int error; 5471 5472 ath_hal_getrfsilent(sc->sc_ah, &rfsilent); 5473 error = sysctl_handle_int(oidp, &rfsilent, 0, req); 5474 if (error || !req->newptr) 5475 return error; 5476 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) 5477 return EINVAL; 5478 sc->sc_rfsilentpin = rfsilent & 0x1c; 5479 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0; 5480 return 0; 5481 } 5482 5483 static int 5484 ath_sysctl_countrycode(SYSCTL_HANDLER_ARGS) 5485 { 5486 struct ath_softc *sc = arg1; 5487 u_int32_t cc = sc->sc_countrycode; 5488 struct ieee80211com *ic = &sc->sc_ic; 5489 int error; 5490 5491 error = sysctl_handle_int(oidp, &cc, 0, req); 5492 if (error || !req->newptr) 5493 return error; 5494 error = ath_getchannels(sc, sc->sc_regdomain, cc, 5495 sc->sc_outdoor, sc->sc_xchanmode); 5496 if (error != 0) 5497 return error; 5498 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 5499 /* setcurmode? */ 5500 return 0; 5501 } 5502 5503 static int 5504 ath_sysctl_regdomain(SYSCTL_HANDLER_ARGS) 5505 { 5506 struct ath_softc *sc = arg1; 5507 u_int32_t rd = sc->sc_regdomain; 5508 struct ieee80211com *ic = &sc->sc_ic; 5509 int error; 5510 5511 error = sysctl_handle_int(oidp, &rd, 0, req); 5512 if (error || !req->newptr) 5513 return error; 5514 if (!ath_hal_setregdomain(sc->sc_ah, rd)) 5515 return EINVAL; 5516 error = ath_getchannels(sc, rd, sc->sc_countrycode, 5517 sc->sc_outdoor, sc->sc_xchanmode); 5518 if (error != 0) 5519 return error; 5520 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 5521 /* setcurmode? */ 5522 return 0; 5523 } 5524 5525 static int 5526 ath_sysctl_tpack(SYSCTL_HANDLER_ARGS) 5527 { 5528 struct ath_softc *sc = arg1; 5529 u_int32_t tpack; 5530 int error; 5531 5532 ath_hal_gettpack(sc->sc_ah, &tpack); 5533 error = sysctl_handle_int(oidp, &tpack, 0, req); 5534 if (error || !req->newptr) 5535 return error; 5536 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0; 5537 } 5538 5539 static int 5540 ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS) 5541 { 5542 struct ath_softc *sc = arg1; 5543 u_int32_t tpcts; 5544 int error; 5545 5546 ath_hal_gettpcts(sc->sc_ah, &tpcts); 5547 error = sysctl_handle_int(oidp, &tpcts, 0, req); 5548 if (error || !req->newptr) 5549 return error; 5550 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0; 5551 } 5552 5553 static void 5554 ath_sysctlattach(struct ath_softc *sc) 5555 { 5556 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 5557 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 5558 struct ath_hal *ah = sc->sc_ah; 5559 5560 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5561 "countrycode", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5562 ath_sysctl_countrycode, "I", "country code"); 5563 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5564 "regdomain", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5565 ath_sysctl_regdomain, "I", "EEPROM regdomain code"); 5566 #ifdef ATH_DEBUG 5567 sc->sc_debug = ath_debug; 5568 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5569 "debug", CTLFLAG_RW, &sc->sc_debug, 0, 5570 "control debugging printfs"); 5571 #endif 5572 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5573 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5574 ath_sysctl_slottime, "I", "802.11 slot time (us)"); 5575 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5576 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5577 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); 5578 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5579 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5580 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); 5581 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5582 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5583 ath_sysctl_softled, "I", "enable/disable software LED support"); 5584 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5585 "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, 5586 "GPIO pin connected to LED"); 5587 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5588 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, 5589 "setting to turn LED on"); 5590 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5591 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, 5592 "idle time for inactivity LED (ticks)"); 5593 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5594 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5595 ath_sysctl_txantenna, "I", "antenna switch"); 5596 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5597 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5598 ath_sysctl_rxantenna, "I", "default/rx antenna"); 5599 if (ath_hal_hasdiversity(ah)) 5600 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5601 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5602 ath_sysctl_diversity, "I", "antenna diversity"); 5603 sc->sc_txintrperiod = ATH_TXINTR_PERIOD; 5604 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5605 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, 5606 "tx descriptor batching"); 5607 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5608 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5609 ath_sysctl_diag, "I", "h/w diagnostic control"); 5610 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5611 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5612 ath_sysctl_tpscale, "I", "tx power scaling"); 5613 if (ath_hal_hastpc(ah)) { 5614 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5615 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5616 ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); 5617 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5618 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5619 ath_sysctl_tpack, "I", "tx power for ack frames"); 5620 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5621 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5622 ath_sysctl_tpcts, "I", "tx power for cts frames"); 5623 } 5624 if (ath_hal_hasrfsilent(ah)) { 5625 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5626 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5627 ath_sysctl_rfsilent, "I", "h/w RF silent config"); 5628 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5629 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5630 ath_sysctl_rfkill, "I", "enable/disable RF kill switch"); 5631 } 5632 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC; 5633 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5634 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0, 5635 "mask of error frames to pass when monitoring"); 5636 } 5637 5638 static void 5639 ath_bpfattach(struct ath_softc *sc) 5640 { 5641 struct ifnet *ifp = sc->sc_ifp; 5642 5643 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 5644 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), 5645 &sc->sc_drvbpf); 5646 /* 5647 * Initialize constant fields. 5648 * XXX make header lengths a multiple of 32-bits so subsequent 5649 * headers are properly aligned; this is a kludge to keep 5650 * certain applications happy. 5651 * 5652 * NB: the channel is setup each time we transition to the 5653 * RUN state to avoid filling it in for each frame. 5654 */ 5655 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 5656 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 5657 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 5658 5659 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 5660 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); 5661 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 5662 } 5663 5664 static int 5665 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 5666 struct ath_buf *bf, struct mbuf *m0, 5667 const struct ieee80211_bpf_params *params) 5668 { 5669 struct ieee80211com *ic = &sc->sc_ic; 5670 struct ath_hal *ah = sc->sc_ah; 5671 int error, ismcast, ismrr; 5672 int hdrlen, pktlen, try0, txantenna; 5673 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3; 5674 struct ath_txq *txq; 5675 struct ieee80211_frame *wh; 5676 u_int flags, ctsduration; 5677 HAL_PKT_TYPE atype; 5678 const HAL_RATE_TABLE *rt; 5679 struct ath_desc *ds; 5680 u_int pri; 5681 5682 wh = mtod(m0, struct ieee80211_frame *); 5683 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 5684 hdrlen = ieee80211_anyhdrsize(wh); 5685 /* 5686 * Packet length must not include any 5687 * pad bytes; deduct them here. 5688 */ 5689 /* XXX honor IEEE80211_BPF_DATAPAD */ 5690 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 5691 5692 error = ath_tx_dmasetup(sc, bf, m0); 5693 if (error != 0) 5694 return error; 5695 m0 = bf->bf_m; /* NB: may have changed */ 5696 wh = mtod(m0, struct ieee80211_frame *); 5697 bf->bf_node = ni; /* NB: held reference */ 5698 5699 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 5700 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 5701 if (params->ibp_flags & IEEE80211_BPF_RTS) 5702 flags |= HAL_TXDESC_RTSENA; 5703 else if (params->ibp_flags & IEEE80211_BPF_CTS) 5704 flags |= HAL_TXDESC_CTSENA; 5705 /* XXX leave ismcast to injector? */ 5706 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 5707 flags |= HAL_TXDESC_NOACK; 5708 5709 rt = sc->sc_currates; 5710 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 5711 rix = ath_tx_findrix(rt, params->ibp_rate0); 5712 txrate = rt->info[rix].rateCode; 5713 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5714 txrate |= rt->info[rix].shortPreamble; 5715 sc->sc_txrate = txrate; 5716 try0 = params->ibp_try0; 5717 ismrr = (params->ibp_try1 != 0); 5718 txantenna = params->ibp_pri >> 2; 5719 if (txantenna == 0) /* XXX? */ 5720 txantenna = sc->sc_txantenna; 5721 ctsduration = 0; 5722 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) { 5723 cix = ath_tx_findrix(rt, params->ibp_ctsrate); 5724 ctsrate = rt->info[cix].rateCode; 5725 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) { 5726 ctsrate |= rt->info[cix].shortPreamble; 5727 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 5728 ctsduration += rt->info[cix].spAckDuration; 5729 ctsduration += ath_hal_computetxtime(ah, 5730 rt, pktlen, rix, AH_TRUE); 5731 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 5732 ctsduration += rt->info[rix].spAckDuration; 5733 } else { 5734 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 5735 ctsduration += rt->info[cix].lpAckDuration; 5736 ctsduration += ath_hal_computetxtime(ah, 5737 rt, pktlen, rix, AH_FALSE); 5738 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 5739 ctsduration += rt->info[rix].lpAckDuration; 5740 } 5741 ismrr = 0; /* XXX */ 5742 } else 5743 ctsrate = 0; 5744 pri = params->ibp_pri & 3; 5745 /* 5746 * NB: we mark all packets as type PSPOLL so the h/w won't 5747 * set the sequence number, duration, etc. 5748 */ 5749 atype = HAL_PKT_TYPE_PSPOLL; 5750 5751 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 5752 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 5753 sc->sc_hwmap[txrate].ieeerate, -1); 5754 5755 if (bpf_peers_present(ic->ic_rawbpf)) 5756 bpf_mtap(ic->ic_rawbpf, m0); 5757 if (bpf_peers_present(sc->sc_drvbpf)) { 5758 u_int64_t tsf = ath_hal_gettsf64(ah); 5759 5760 sc->sc_tx_th.wt_tsf = htole64(tsf); 5761 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 5762 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 5763 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 5764 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 5765 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 5766 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 5767 5768 bpf_mtap2(sc->sc_drvbpf, 5769 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 5770 } 5771 5772 /* 5773 * Formulate first tx descriptor with tx controls. 5774 */ 5775 ds = bf->bf_desc; 5776 /* XXX check return value? */ 5777 ath_hal_setuptxdesc(ah, ds 5778 , pktlen /* packet length */ 5779 , hdrlen /* header length */ 5780 , atype /* Atheros packet type */ 5781 , params->ibp_power /* txpower */ 5782 , txrate, try0 /* series 0 rate/tries */ 5783 , HAL_TXKEYIX_INVALID /* key cache index */ 5784 , txantenna /* antenna mode */ 5785 , flags /* flags */ 5786 , ctsrate /* rts/cts rate */ 5787 , ctsduration /* rts/cts duration */ 5788 ); 5789 bf->bf_flags = flags; 5790 5791 if (ismrr) { 5792 rix = ath_tx_findrix(rt, params->ibp_rate1); 5793 rate1 = rt->info[rix].rateCode; 5794 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5795 rate1 |= rt->info[rix].shortPreamble; 5796 if (params->ibp_try2) { 5797 rix = ath_tx_findrix(rt, params->ibp_rate2); 5798 rate2 = rt->info[rix].rateCode; 5799 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5800 rate2 |= rt->info[rix].shortPreamble; 5801 } else 5802 rate2 = 0; 5803 if (params->ibp_try3) { 5804 rix = ath_tx_findrix(rt, params->ibp_rate3); 5805 rate3 = rt->info[rix].rateCode; 5806 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5807 rate3 |= rt->info[rix].shortPreamble; 5808 } else 5809 rate3 = 0; 5810 ath_hal_setupxtxdesc(ah, ds 5811 , rate1, params->ibp_try1 /* series 1 */ 5812 , rate2, params->ibp_try2 /* series 2 */ 5813 , rate3, params->ibp_try3 /* series 3 */ 5814 ); 5815 } 5816 5817 /* 5818 * When servicing one or more stations in power-save mode 5819 * (or) if there is some mcast data waiting on the mcast 5820 * queue (to prevent out of order delivery) multicast 5821 * frames must be buffered until after the beacon. 5822 */ 5823 txq = sc->sc_ac2q[pri]; 5824 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth)) 5825 txq = &sc->sc_mcastq; 5826 ath_tx_handoff(sc, txq, bf); 5827 return 0; 5828 } 5829 5830 static int 5831 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 5832 const struct ieee80211_bpf_params *params) 5833 { 5834 struct ieee80211com *ic = ni->ni_ic; 5835 struct ifnet *ifp = ic->ic_ifp; 5836 struct ath_softc *sc = ifp->if_softc; 5837 struct ath_buf *bf; 5838 5839 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 5840 m_freem(m); 5841 return ENETDOWN; 5842 } 5843 /* 5844 * Grab a TX buffer and associated resources. 5845 */ 5846 ATH_TXBUF_LOCK(sc); 5847 bf = STAILQ_FIRST(&sc->sc_txbuf); 5848 if (bf != NULL) 5849 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 5850 ATH_TXBUF_UNLOCK(sc); 5851 if (bf == NULL) { 5852 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 5853 __func__); 5854 sc->sc_stats.ast_tx_qstop++; 5855 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5856 m_freem(m); 5857 return ENOBUFS; 5858 } 5859 5860 ifp->if_opackets++; 5861 sc->sc_stats.ast_tx_raw++; 5862 5863 if (params == NULL) { 5864 /* 5865 * Legacy path; interpret frame contents to decide 5866 * precisely how to send the frame. 5867 */ 5868 if (ath_tx_start(sc, ni, bf, m)) 5869 goto bad; 5870 } else { 5871 /* 5872 * Caller supplied explicit parameters to use in 5873 * sending the frame. 5874 */ 5875 if (ath_tx_raw_start(sc, ni, bf, m, params)) 5876 goto bad; 5877 } 5878 sc->sc_tx_timer = 5; 5879 ifp->if_timer = 1; 5880 5881 return 0; 5882 bad: 5883 ifp->if_oerrors++; 5884 ATH_TXBUF_LOCK(sc); 5885 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5886 ATH_TXBUF_UNLOCK(sc); 5887 ieee80211_free_node(ni); 5888 return EIO; /* XXX */ 5889 } 5890 5891 /* 5892 * Announce various information on device/driver attach. 5893 */ 5894 static void 5895 ath_announce(struct ath_softc *sc) 5896 { 5897 #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 5898 struct ifnet *ifp = sc->sc_ifp; 5899 struct ath_hal *ah = sc->sc_ah; 5900 u_int modes, cc; 5901 5902 if_printf(ifp, "mac %d.%d phy %d.%d", 5903 ah->ah_macVersion, ah->ah_macRev, 5904 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5905 /* 5906 * Print radio revision(s). We check the wireless modes 5907 * to avoid falsely printing revs for inoperable parts. 5908 * Dual-band radio revs are returned in the 5Ghz rev number. 5909 */ 5910 ath_hal_getcountrycode(ah, &cc); 5911 modes = ath_hal_getwirelessmodes(ah, cc); 5912 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 5913 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 5914 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 5915 ah->ah_analog5GhzRev >> 4, 5916 ah->ah_analog5GhzRev & 0xf, 5917 ah->ah_analog2GhzRev >> 4, 5918 ah->ah_analog2GhzRev & 0xf); 5919 else 5920 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 5921 ah->ah_analog5GhzRev & 0xf); 5922 } else 5923 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 5924 ah->ah_analog5GhzRev & 0xf); 5925 printf("\n"); 5926 if (bootverbose) { 5927 int i; 5928 for (i = 0; i <= WME_AC_VO; i++) { 5929 struct ath_txq *txq = sc->sc_ac2q[i]; 5930 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5931 txq->axq_qnum, ieee80211_wme_acnames[i]); 5932 } 5933 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5934 sc->sc_cabq->axq_qnum); 5935 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5936 } 5937 if (ath_rxbuf != ATH_RXBUF) 5938 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5939 if (ath_txbuf != ATH_TXBUF) 5940 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5941 #undef HAL_MODE_DUALBAND 5942 } 5943