1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting 5 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 19 * NO WARRANTY 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGES. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * Driver for the Marvell 88W8363 Wireless LAN controller. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_mwl.h" 42 #include "opt_wlan.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysctl.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/errno.h> 55 #include <sys/callout.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kthread.h> 59 #include <sys/taskqueue.h> 60 61 #include <machine/bus.h> 62 63 #include <net/if.h> 64 #include <net/if_var.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_arp.h> 69 #include <net/ethernet.h> 70 #include <net/if_llc.h> 71 72 #include <net/bpf.h> 73 74 #include <net80211/ieee80211_var.h> 75 #include <net80211/ieee80211_input.h> 76 #include <net80211/ieee80211_regdomain.h> 77 78 #ifdef INET 79 #include <netinet/in.h> 80 #include <netinet/if_ether.h> 81 #endif /* INET */ 82 83 #include <dev/mwl/if_mwlvar.h> 84 #include <dev/mwl/mwldiag.h> 85 86 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ 87 #define MS(v,x) (((v) & x) >> x##_S) 88 #define SM(v,x) (((v) << x##_S) & x) 89 90 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *, 91 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 92 const uint8_t [IEEE80211_ADDR_LEN], 93 const uint8_t [IEEE80211_ADDR_LEN]); 94 static void mwl_vap_delete(struct ieee80211vap *); 95 static int mwl_setupdma(struct mwl_softc *); 96 static int mwl_hal_reset(struct mwl_softc *sc); 97 static int mwl_init(struct mwl_softc *); 98 static void mwl_parent(struct ieee80211com *); 99 static int mwl_reset(struct ieee80211vap *, u_long); 100 static void mwl_stop(struct mwl_softc *); 101 static void mwl_start(struct mwl_softc *); 102 static int mwl_transmit(struct ieee80211com *, struct mbuf *); 103 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *, 104 const struct ieee80211_bpf_params *); 105 static int mwl_media_change(struct ifnet *); 106 static void mwl_watchdog(void *); 107 static int mwl_ioctl(struct ieee80211com *, u_long, void *); 108 static void mwl_radar_proc(void *, int); 109 static void mwl_chanswitch_proc(void *, int); 110 static void mwl_bawatchdog_proc(void *, int); 111 static int mwl_key_alloc(struct ieee80211vap *, 112 struct ieee80211_key *, 113 ieee80211_keyix *, ieee80211_keyix *); 114 static int mwl_key_delete(struct ieee80211vap *, 115 const struct ieee80211_key *); 116 static int mwl_key_set(struct ieee80211vap *, 117 const struct ieee80211_key *); 118 static int _mwl_key_set(struct ieee80211vap *, 119 const struct ieee80211_key *, 120 const uint8_t mac[IEEE80211_ADDR_LEN]); 121 static int mwl_mode_init(struct mwl_softc *); 122 static void mwl_update_mcast(struct ieee80211com *); 123 static void mwl_update_promisc(struct ieee80211com *); 124 static void mwl_updateslot(struct ieee80211com *); 125 static int mwl_beacon_setup(struct ieee80211vap *); 126 static void mwl_beacon_update(struct ieee80211vap *, int); 127 #ifdef MWL_HOST_PS_SUPPORT 128 static void mwl_update_ps(struct ieee80211vap *, int); 129 static int mwl_set_tim(struct ieee80211_node *, int); 130 #endif 131 static int mwl_dma_setup(struct mwl_softc *); 132 static void mwl_dma_cleanup(struct mwl_softc *); 133 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *, 134 const uint8_t [IEEE80211_ADDR_LEN]); 135 static void mwl_node_cleanup(struct ieee80211_node *); 136 static void mwl_node_drain(struct ieee80211_node *); 137 static void mwl_node_getsignal(const struct ieee80211_node *, 138 int8_t *, int8_t *); 139 static void mwl_node_getmimoinfo(const struct ieee80211_node *, 140 struct ieee80211_mimo_info *); 141 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *); 142 static void mwl_rx_proc(void *, int); 143 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int); 144 static int mwl_tx_setup(struct mwl_softc *, int, int); 145 static int mwl_wme_update(struct ieee80211com *); 146 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *); 147 static void mwl_tx_cleanup(struct mwl_softc *); 148 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *); 149 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *, 150 struct mwl_txbuf *, struct mbuf *); 151 static void mwl_tx_proc(void *, int); 152 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *); 153 static void mwl_draintxq(struct mwl_softc *); 154 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *); 155 static int mwl_recv_action(struct ieee80211_node *, 156 const struct ieee80211_frame *, 157 const uint8_t *, const uint8_t *); 158 static int mwl_addba_request(struct ieee80211_node *, 159 struct ieee80211_tx_ampdu *, int dialogtoken, 160 int baparamset, int batimeout); 161 static int mwl_addba_response(struct ieee80211_node *, 162 struct ieee80211_tx_ampdu *, int status, 163 int baparamset, int batimeout); 164 static void mwl_addba_stop(struct ieee80211_node *, 165 struct ieee80211_tx_ampdu *); 166 static int mwl_startrecv(struct mwl_softc *); 167 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *, 168 struct ieee80211_channel *); 169 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*); 170 static void mwl_scan_start(struct ieee80211com *); 171 static void mwl_scan_end(struct ieee80211com *); 172 static void mwl_set_channel(struct ieee80211com *); 173 static int mwl_peerstadb(struct ieee80211_node *, 174 int aid, int staid, MWL_HAL_PEERINFO *pi); 175 static int mwl_localstadb(struct ieee80211vap *); 176 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int); 177 static int allocstaid(struct mwl_softc *sc, int aid); 178 static void delstaid(struct mwl_softc *sc, int staid); 179 static void mwl_newassoc(struct ieee80211_node *, int); 180 static void mwl_agestations(void *); 181 static int mwl_setregdomain(struct ieee80211com *, 182 struct ieee80211_regdomain *, int, 183 struct ieee80211_channel []); 184 static void mwl_getradiocaps(struct ieee80211com *, int, int *, 185 struct ieee80211_channel []); 186 static int mwl_getchannels(struct mwl_softc *); 187 188 static void mwl_sysctlattach(struct mwl_softc *); 189 static void mwl_announce(struct mwl_softc *); 190 191 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters"); 192 193 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */ 194 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc, 195 0, "rx descriptors allocated"); 196 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */ 197 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf, 198 0, "rx buffers allocated"); 199 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */ 200 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf, 201 0, "tx buffers allocated"); 202 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/ 203 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce, 204 0, "tx buffers to send at once"); 205 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */ 206 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota, 207 0, "max rx buffers to process per interrupt"); 208 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */ 209 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow, 210 0, "min free rx buffers before restarting traffic"); 211 212 #ifdef MWL_DEBUG 213 static int mwl_debug = 0; 214 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug, 215 0, "control debugging printfs"); 216 enum { 217 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 218 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 219 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */ 220 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 221 MWL_DEBUG_RESET = 0x00000010, /* reset processing */ 222 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */ 223 MWL_DEBUG_INTR = 0x00000040, /* ISR */ 224 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ 225 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ 226 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */ 227 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ 228 MWL_DEBUG_NODE = 0x00000800, /* node management */ 229 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ 230 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */ 231 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */ 232 MWL_DEBUG_ANY = 0xffffffff 233 }; 234 #define IS_BEACON(wh) \ 235 ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \ 236 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) 237 #define IFF_DUMPPKTS_RECV(sc, wh) \ 238 ((sc->sc_debug & MWL_DEBUG_RECV) && \ 239 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) 240 #define IFF_DUMPPKTS_XMIT(sc) \ 241 (sc->sc_debug & MWL_DEBUG_XMIT) 242 243 #define DPRINTF(sc, m, fmt, ...) do { \ 244 if (sc->sc_debug & (m)) \ 245 printf(fmt, __VA_ARGS__); \ 246 } while (0) 247 #define KEYPRINTF(sc, hk, mac) do { \ 248 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \ 249 mwl_keyprint(sc, __func__, hk, mac); \ 250 } while (0) 251 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix); 252 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix); 253 #else 254 #define IFF_DUMPPKTS_RECV(sc, wh) 0 255 #define IFF_DUMPPKTS_XMIT(sc) 0 256 #define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0) 257 #define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0) 258 #endif 259 260 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers"); 261 262 /* 263 * Each packet has fixed front matter: a 2-byte length 264 * of the payload, followed by a 4-address 802.11 header 265 * (regardless of the actual header and always w/o any 266 * QoS header). The payload then follows. 267 */ 268 struct mwltxrec { 269 uint16_t fwlen; 270 struct ieee80211_frame_addr4 wh; 271 } __packed; 272 273 /* 274 * Read/Write shorthands for accesses to BAR 0. Note 275 * that all BAR 1 operations are done in the "hal" and 276 * there should be no reference to them here. 277 */ 278 #ifdef MWL_DEBUG 279 static __inline uint32_t 280 RD4(struct mwl_softc *sc, bus_size_t off) 281 { 282 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off); 283 } 284 #endif 285 286 static __inline void 287 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val) 288 { 289 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val); 290 } 291 292 int 293 mwl_attach(uint16_t devid, struct mwl_softc *sc) 294 { 295 struct ieee80211com *ic = &sc->sc_ic; 296 struct mwl_hal *mh; 297 int error = 0; 298 299 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 300 301 /* 302 * Setup the RX free list lock early, so it can be consistently 303 * removed. 304 */ 305 MWL_RXFREE_INIT(sc); 306 307 mh = mwl_hal_attach(sc->sc_dev, devid, 308 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat); 309 if (mh == NULL) { 310 device_printf(sc->sc_dev, "unable to attach HAL\n"); 311 error = EIO; 312 goto bad; 313 } 314 sc->sc_mh = mh; 315 /* 316 * Load firmware so we can get setup. We arbitrarily 317 * pick station firmware; we'll re-load firmware as 318 * needed so setting up the wrong mode isn't a big deal. 319 */ 320 if (mwl_hal_fwload(mh, NULL) != 0) { 321 device_printf(sc->sc_dev, "unable to setup builtin firmware\n"); 322 error = EIO; 323 goto bad1; 324 } 325 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { 326 device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); 327 error = EIO; 328 goto bad1; 329 } 330 error = mwl_getchannels(sc); 331 if (error != 0) 332 goto bad1; 333 334 sc->sc_txantenna = 0; /* h/w default */ 335 sc->sc_rxantenna = 0; /* h/w default */ 336 sc->sc_invalid = 0; /* ready to go, enable int handling */ 337 sc->sc_ageinterval = MWL_AGEINTERVAL; 338 339 /* 340 * Allocate tx+rx descriptors and populate the lists. 341 * We immediately push the information to the firmware 342 * as otherwise it gets upset. 343 */ 344 error = mwl_dma_setup(sc); 345 if (error != 0) { 346 device_printf(sc->sc_dev, "failed to setup descriptors: %d\n", 347 error); 348 goto bad1; 349 } 350 error = mwl_setupdma(sc); /* push to firmware */ 351 if (error != 0) /* NB: mwl_setupdma prints msg */ 352 goto bad1; 353 354 callout_init(&sc->sc_timer, 1); 355 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); 356 mbufq_init(&sc->sc_snd, ifqmaxlen); 357 358 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT, 359 taskqueue_thread_enqueue, &sc->sc_tq); 360 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 361 "%s taskq", device_get_nameunit(sc->sc_dev)); 362 363 TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); 364 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); 365 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); 366 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); 367 368 /* NB: insure BK queue is the lowest priority h/w queue */ 369 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) { 370 device_printf(sc->sc_dev, 371 "unable to setup xmit queue for %s traffic!\n", 372 ieee80211_wme_acnames[WME_AC_BK]); 373 error = EIO; 374 goto bad2; 375 } 376 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) || 377 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) || 378 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) { 379 /* 380 * Not enough hardware tx queues to properly do WME; 381 * just punt and assign them all to the same h/w queue. 382 * We could do a better job of this if, for example, 383 * we allocate queues when we switch from station to 384 * AP mode. 385 */ 386 if (sc->sc_ac2q[WME_AC_VI] != NULL) 387 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 388 if (sc->sc_ac2q[WME_AC_BE] != NULL) 389 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 390 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 391 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 392 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 393 } 394 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc); 395 396 ic->ic_softc = sc; 397 ic->ic_name = device_get_nameunit(sc->sc_dev); 398 /* XXX not right but it's not used anywhere important */ 399 ic->ic_phytype = IEEE80211_T_OFDM; 400 ic->ic_opmode = IEEE80211_M_STA; 401 ic->ic_caps = 402 IEEE80211_C_STA /* station mode supported */ 403 | IEEE80211_C_HOSTAP /* hostap mode */ 404 | IEEE80211_C_MONITOR /* monitor mode */ 405 #if 0 406 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 407 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 408 #endif 409 | IEEE80211_C_MBSS /* mesh point link mode */ 410 | IEEE80211_C_WDS /* WDS supported */ 411 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 412 | IEEE80211_C_SHSLOT /* short slot time supported */ 413 | IEEE80211_C_WME /* WME/WMM supported */ 414 | IEEE80211_C_BURST /* xmit bursting supported */ 415 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 416 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 417 | IEEE80211_C_TXFRAG /* handle tx frags */ 418 | IEEE80211_C_TXPMGT /* capable of txpow mgt */ 419 | IEEE80211_C_DFS /* DFS supported */ 420 ; 421 422 ic->ic_htcaps = 423 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ 424 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ 425 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 426 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 427 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ 428 #if MWL_AGGR_SIZE == 7935 429 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 430 #else 431 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 432 #endif 433 #if 0 434 | IEEE80211_HTCAP_PSMP /* PSMP supported */ 435 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */ 436 #endif 437 /* s/w capabilities */ 438 | IEEE80211_HTC_HT /* HT operation */ 439 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 440 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 441 | IEEE80211_HTC_SMPS /* SMPS available */ 442 ; 443 444 /* 445 * Mark h/w crypto support. 446 * XXX no way to query h/w support. 447 */ 448 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP 449 | IEEE80211_CRYPTO_AES_CCM 450 | IEEE80211_CRYPTO_TKIP 451 | IEEE80211_CRYPTO_TKIPMIC 452 ; 453 /* 454 * Transmit requires space in the packet for a special 455 * format transmit record and optional padding between 456 * this record and the payload. Ask the net80211 layer 457 * to arrange this when encapsulating packets so we can 458 * add it efficiently. 459 */ 460 ic->ic_headroom = sizeof(struct mwltxrec) - 461 sizeof(struct ieee80211_frame); 462 463 IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr); 464 465 /* call MI attach routine. */ 466 ieee80211_ifattach(ic); 467 ic->ic_setregdomain = mwl_setregdomain; 468 ic->ic_getradiocaps = mwl_getradiocaps; 469 /* override default methods */ 470 ic->ic_raw_xmit = mwl_raw_xmit; 471 ic->ic_newassoc = mwl_newassoc; 472 ic->ic_updateslot = mwl_updateslot; 473 ic->ic_update_mcast = mwl_update_mcast; 474 ic->ic_update_promisc = mwl_update_promisc; 475 ic->ic_wme.wme_update = mwl_wme_update; 476 ic->ic_transmit = mwl_transmit; 477 ic->ic_ioctl = mwl_ioctl; 478 ic->ic_parent = mwl_parent; 479 480 ic->ic_node_alloc = mwl_node_alloc; 481 sc->sc_node_cleanup = ic->ic_node_cleanup; 482 ic->ic_node_cleanup = mwl_node_cleanup; 483 sc->sc_node_drain = ic->ic_node_drain; 484 ic->ic_node_drain = mwl_node_drain; 485 ic->ic_node_getsignal = mwl_node_getsignal; 486 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo; 487 488 ic->ic_scan_start = mwl_scan_start; 489 ic->ic_scan_end = mwl_scan_end; 490 ic->ic_set_channel = mwl_set_channel; 491 492 sc->sc_recv_action = ic->ic_recv_action; 493 ic->ic_recv_action = mwl_recv_action; 494 sc->sc_addba_request = ic->ic_addba_request; 495 ic->ic_addba_request = mwl_addba_request; 496 sc->sc_addba_response = ic->ic_addba_response; 497 ic->ic_addba_response = mwl_addba_response; 498 sc->sc_addba_stop = ic->ic_addba_stop; 499 ic->ic_addba_stop = mwl_addba_stop; 500 501 ic->ic_vap_create = mwl_vap_create; 502 ic->ic_vap_delete = mwl_vap_delete; 503 504 ieee80211_radiotap_attach(ic, 505 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 506 MWL_TX_RADIOTAP_PRESENT, 507 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 508 MWL_RX_RADIOTAP_PRESENT); 509 /* 510 * Setup dynamic sysctl's now that country code and 511 * regdomain are available from the hal. 512 */ 513 mwl_sysctlattach(sc); 514 515 if (bootverbose) 516 ieee80211_announce(ic); 517 mwl_announce(sc); 518 return 0; 519 bad2: 520 mwl_dma_cleanup(sc); 521 bad1: 522 mwl_hal_detach(mh); 523 bad: 524 MWL_RXFREE_DESTROY(sc); 525 sc->sc_invalid = 1; 526 return error; 527 } 528 529 int 530 mwl_detach(struct mwl_softc *sc) 531 { 532 struct ieee80211com *ic = &sc->sc_ic; 533 534 MWL_LOCK(sc); 535 mwl_stop(sc); 536 MWL_UNLOCK(sc); 537 /* 538 * NB: the order of these is important: 539 * o call the 802.11 layer before detaching the hal to 540 * insure callbacks into the driver to delete global 541 * key cache entries can be handled 542 * o reclaim the tx queue data structures after calling 543 * the 802.11 layer as we'll get called back to reclaim 544 * node state and potentially want to use them 545 * o to cleanup the tx queues the hal is called, so detach 546 * it last 547 * Other than that, it's straightforward... 548 */ 549 ieee80211_ifdetach(ic); 550 callout_drain(&sc->sc_watchdog); 551 mwl_dma_cleanup(sc); 552 MWL_RXFREE_DESTROY(sc); 553 mwl_tx_cleanup(sc); 554 mwl_hal_detach(sc->sc_mh); 555 mbufq_drain(&sc->sc_snd); 556 557 return 0; 558 } 559 560 /* 561 * MAC address handling for multiple BSS on the same radio. 562 * The first vap uses the MAC address from the EEPROM. For 563 * subsequent vap's we set the U/L bit (bit 1) in the MAC 564 * address and use the next six bits as an index. 565 */ 566 static void 567 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 568 { 569 int i; 570 571 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) { 572 /* NB: we only do this if h/w supports multiple bssid */ 573 for (i = 0; i < 32; i++) 574 if ((sc->sc_bssidmask & (1<<i)) == 0) 575 break; 576 if (i != 0) 577 mac[0] |= (i << 2)|0x2; 578 } else 579 i = 0; 580 sc->sc_bssidmask |= 1<<i; 581 if (i == 0) 582 sc->sc_nbssid0++; 583 } 584 585 static void 586 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 587 { 588 int i = mac[0] >> 2; 589 if (i != 0 || --sc->sc_nbssid0 == 0) 590 sc->sc_bssidmask &= ~(1<<i); 591 } 592 593 static struct ieee80211vap * 594 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 595 enum ieee80211_opmode opmode, int flags, 596 const uint8_t bssid[IEEE80211_ADDR_LEN], 597 const uint8_t mac0[IEEE80211_ADDR_LEN]) 598 { 599 struct mwl_softc *sc = ic->ic_softc; 600 struct mwl_hal *mh = sc->sc_mh; 601 struct ieee80211vap *vap, *apvap; 602 struct mwl_hal_vap *hvap; 603 struct mwl_vap *mvp; 604 uint8_t mac[IEEE80211_ADDR_LEN]; 605 606 IEEE80211_ADDR_COPY(mac, mac0); 607 switch (opmode) { 608 case IEEE80211_M_HOSTAP: 609 case IEEE80211_M_MBSS: 610 if ((flags & IEEE80211_CLONE_MACADDR) == 0) 611 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 612 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac); 613 if (hvap == NULL) { 614 if ((flags & IEEE80211_CLONE_MACADDR) == 0) 615 reclaim_address(sc, mac); 616 return NULL; 617 } 618 break; 619 case IEEE80211_M_STA: 620 if ((flags & IEEE80211_CLONE_MACADDR) == 0) 621 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 622 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac); 623 if (hvap == NULL) { 624 if ((flags & IEEE80211_CLONE_MACADDR) == 0) 625 reclaim_address(sc, mac); 626 return NULL; 627 } 628 /* no h/w beacon miss support; always use s/w */ 629 flags |= IEEE80211_CLONE_NOBEACONS; 630 break; 631 case IEEE80211_M_WDS: 632 hvap = NULL; /* NB: we use associated AP vap */ 633 if (sc->sc_napvaps == 0) 634 return NULL; /* no existing AP vap */ 635 break; 636 case IEEE80211_M_MONITOR: 637 hvap = NULL; 638 break; 639 case IEEE80211_M_IBSS: 640 case IEEE80211_M_AHDEMO: 641 default: 642 return NULL; 643 } 644 645 mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO); 646 mvp->mv_hvap = hvap; 647 if (opmode == IEEE80211_M_WDS) { 648 /* 649 * WDS vaps must have an associated AP vap; find one. 650 * XXX not right. 651 */ 652 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next) 653 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) { 654 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap; 655 break; 656 } 657 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap")); 658 } 659 vap = &mvp->mv_vap; 660 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 661 /* override with driver methods */ 662 mvp->mv_newstate = vap->iv_newstate; 663 vap->iv_newstate = mwl_newstate; 664 vap->iv_max_keyix = 0; /* XXX */ 665 vap->iv_key_alloc = mwl_key_alloc; 666 vap->iv_key_delete = mwl_key_delete; 667 vap->iv_key_set = mwl_key_set; 668 #ifdef MWL_HOST_PS_SUPPORT 669 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 670 vap->iv_update_ps = mwl_update_ps; 671 mvp->mv_set_tim = vap->iv_set_tim; 672 vap->iv_set_tim = mwl_set_tim; 673 } 674 #endif 675 vap->iv_reset = mwl_reset; 676 vap->iv_update_beacon = mwl_beacon_update; 677 678 /* override max aid so sta's cannot assoc when we're out of sta id's */ 679 vap->iv_max_aid = MWL_MAXSTAID; 680 /* override default A-MPDU rx parameters */ 681 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 682 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; 683 684 /* complete setup */ 685 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status, 686 mac); 687 688 switch (vap->iv_opmode) { 689 case IEEE80211_M_HOSTAP: 690 case IEEE80211_M_MBSS: 691 case IEEE80211_M_STA: 692 /* 693 * Setup sta db entry for local address. 694 */ 695 mwl_localstadb(vap); 696 if (vap->iv_opmode == IEEE80211_M_HOSTAP || 697 vap->iv_opmode == IEEE80211_M_MBSS) 698 sc->sc_napvaps++; 699 else 700 sc->sc_nstavaps++; 701 break; 702 case IEEE80211_M_WDS: 703 sc->sc_nwdsvaps++; 704 break; 705 default: 706 break; 707 } 708 /* 709 * Setup overall operating mode. 710 */ 711 if (sc->sc_napvaps) 712 ic->ic_opmode = IEEE80211_M_HOSTAP; 713 else if (sc->sc_nstavaps) 714 ic->ic_opmode = IEEE80211_M_STA; 715 else 716 ic->ic_opmode = opmode; 717 718 return vap; 719 } 720 721 static void 722 mwl_vap_delete(struct ieee80211vap *vap) 723 { 724 struct mwl_vap *mvp = MWL_VAP(vap); 725 struct mwl_softc *sc = vap->iv_ic->ic_softc; 726 struct mwl_hal *mh = sc->sc_mh; 727 struct mwl_hal_vap *hvap = mvp->mv_hvap; 728 enum ieee80211_opmode opmode = vap->iv_opmode; 729 730 /* XXX disallow ap vap delete if WDS still present */ 731 if (sc->sc_running) { 732 /* quiesce h/w while we remove the vap */ 733 mwl_hal_intrset(mh, 0); /* disable interrupts */ 734 } 735 ieee80211_vap_detach(vap); 736 switch (opmode) { 737 case IEEE80211_M_HOSTAP: 738 case IEEE80211_M_MBSS: 739 case IEEE80211_M_STA: 740 KASSERT(hvap != NULL, ("no hal vap handle")); 741 (void) mwl_hal_delstation(hvap, vap->iv_myaddr); 742 mwl_hal_delvap(hvap); 743 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) 744 sc->sc_napvaps--; 745 else 746 sc->sc_nstavaps--; 747 /* XXX don't do it for IEEE80211_CLONE_MACADDR */ 748 reclaim_address(sc, vap->iv_myaddr); 749 break; 750 case IEEE80211_M_WDS: 751 sc->sc_nwdsvaps--; 752 break; 753 default: 754 break; 755 } 756 mwl_cleartxq(sc, vap); 757 free(mvp, M_80211_VAP); 758 if (sc->sc_running) 759 mwl_hal_intrset(mh, sc->sc_imask); 760 } 761 762 void 763 mwl_suspend(struct mwl_softc *sc) 764 { 765 766 MWL_LOCK(sc); 767 mwl_stop(sc); 768 MWL_UNLOCK(sc); 769 } 770 771 void 772 mwl_resume(struct mwl_softc *sc) 773 { 774 int error = EDOOFUS; 775 776 MWL_LOCK(sc); 777 if (sc->sc_ic.ic_nrunning > 0) 778 error = mwl_init(sc); 779 MWL_UNLOCK(sc); 780 781 if (error == 0) 782 ieee80211_start_all(&sc->sc_ic); /* start all vap's */ 783 } 784 785 void 786 mwl_shutdown(void *arg) 787 { 788 struct mwl_softc *sc = arg; 789 790 MWL_LOCK(sc); 791 mwl_stop(sc); 792 MWL_UNLOCK(sc); 793 } 794 795 /* 796 * Interrupt handler. Most of the actual processing is deferred. 797 */ 798 void 799 mwl_intr(void *arg) 800 { 801 struct mwl_softc *sc = arg; 802 struct mwl_hal *mh = sc->sc_mh; 803 uint32_t status; 804 805 if (sc->sc_invalid) { 806 /* 807 * The hardware is not ready/present, don't touch anything. 808 * Note this can happen early on if the IRQ is shared. 809 */ 810 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 811 return; 812 } 813 /* 814 * Figure out the reason(s) for the interrupt. 815 */ 816 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */ 817 if (status == 0) /* must be a shared irq */ 818 return; 819 820 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", 821 __func__, status, sc->sc_imask); 822 if (status & MACREG_A2HRIC_BIT_RX_RDY) 823 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 824 if (status & MACREG_A2HRIC_BIT_TX_DONE) 825 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 826 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG) 827 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask); 828 if (status & MACREG_A2HRIC_BIT_OPC_DONE) 829 mwl_hal_cmddone(mh); 830 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) { 831 ; 832 } 833 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) { 834 /* TKIP ICV error */ 835 sc->sc_stats.mst_rx_badtkipicv++; 836 } 837 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) { 838 /* 11n aggregation queue is empty, re-fill */ 839 ; 840 } 841 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) { 842 ; 843 } 844 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) { 845 /* radar detected, process event */ 846 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask); 847 } 848 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) { 849 /* DFS channel switch */ 850 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask); 851 } 852 } 853 854 static void 855 mwl_radar_proc(void *arg, int pending) 856 { 857 struct mwl_softc *sc = arg; 858 struct ieee80211com *ic = &sc->sc_ic; 859 860 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n", 861 __func__, pending); 862 863 sc->sc_stats.mst_radardetect++; 864 /* XXX stop h/w BA streams? */ 865 866 IEEE80211_LOCK(ic); 867 ieee80211_dfs_notify_radar(ic, ic->ic_curchan); 868 IEEE80211_UNLOCK(ic); 869 } 870 871 static void 872 mwl_chanswitch_proc(void *arg, int pending) 873 { 874 struct mwl_softc *sc = arg; 875 struct ieee80211com *ic = &sc->sc_ic; 876 877 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n", 878 __func__, pending); 879 880 IEEE80211_LOCK(ic); 881 sc->sc_csapending = 0; 882 ieee80211_csa_completeswitch(ic); 883 IEEE80211_UNLOCK(ic); 884 } 885 886 static void 887 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp) 888 { 889 struct ieee80211_node *ni = sp->data[0]; 890 891 /* send DELBA and drop the stream */ 892 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED); 893 } 894 895 static void 896 mwl_bawatchdog_proc(void *arg, int pending) 897 { 898 struct mwl_softc *sc = arg; 899 struct mwl_hal *mh = sc->sc_mh; 900 const MWL_HAL_BASTREAM *sp; 901 uint8_t bitmap, n; 902 903 sc->sc_stats.mst_bawatchdog++; 904 905 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) { 906 DPRINTF(sc, MWL_DEBUG_AMPDU, 907 "%s: could not get bitmap\n", __func__); 908 sc->sc_stats.mst_bawatchdog_failed++; 909 return; 910 } 911 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap); 912 if (bitmap == 0xff) { 913 n = 0; 914 /* disable all ba streams */ 915 for (bitmap = 0; bitmap < 8; bitmap++) { 916 sp = mwl_hal_bastream_lookup(mh, bitmap); 917 if (sp != NULL) { 918 mwl_bawatchdog(sp); 919 n++; 920 } 921 } 922 if (n == 0) { 923 DPRINTF(sc, MWL_DEBUG_AMPDU, 924 "%s: no BA streams found\n", __func__); 925 sc->sc_stats.mst_bawatchdog_empty++; 926 } 927 } else if (bitmap != 0xaa) { 928 /* disable a single ba stream */ 929 sp = mwl_hal_bastream_lookup(mh, bitmap); 930 if (sp != NULL) { 931 mwl_bawatchdog(sp); 932 } else { 933 DPRINTF(sc, MWL_DEBUG_AMPDU, 934 "%s: no BA stream %d\n", __func__, bitmap); 935 sc->sc_stats.mst_bawatchdog_notfound++; 936 } 937 } 938 } 939 940 /* 941 * Convert net80211 channel to a HAL channel. 942 */ 943 static void 944 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan) 945 { 946 hc->channel = chan->ic_ieee; 947 948 *(uint32_t *)&hc->channelFlags = 0; 949 if (IEEE80211_IS_CHAN_2GHZ(chan)) 950 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ; 951 else if (IEEE80211_IS_CHAN_5GHZ(chan)) 952 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ; 953 if (IEEE80211_IS_CHAN_HT40(chan)) { 954 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH; 955 if (IEEE80211_IS_CHAN_HT40U(chan)) 956 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH; 957 else 958 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH; 959 } else 960 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH; 961 /* XXX 10MHz channels */ 962 } 963 964 /* 965 * Inform firmware of our tx/rx dma setup. The BAR 0 966 * writes below are for compatibility with older firmware. 967 * For current firmware we send this information with a 968 * cmd block via mwl_hal_sethwdma. 969 */ 970 static int 971 mwl_setupdma(struct mwl_softc *sc) 972 { 973 int error, i; 974 975 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr; 976 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead); 977 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead); 978 979 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) { 980 struct mwl_txq *txq = &sc->sc_txq[i]; 981 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr; 982 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]); 983 } 984 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf; 985 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; 986 987 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma); 988 if (error != 0) { 989 device_printf(sc->sc_dev, 990 "unable to setup tx/rx dma; hal status %u\n", error); 991 /* XXX */ 992 } 993 return error; 994 } 995 996 /* 997 * Inform firmware of tx rate parameters. 998 * Called after a channel change. 999 */ 1000 static int 1001 mwl_setcurchanrates(struct mwl_softc *sc) 1002 { 1003 struct ieee80211com *ic = &sc->sc_ic; 1004 const struct ieee80211_rateset *rs; 1005 MWL_HAL_TXRATE rates; 1006 1007 memset(&rates, 0, sizeof(rates)); 1008 rs = ieee80211_get_suprates(ic, ic->ic_curchan); 1009 /* rate used to send management frames */ 1010 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL; 1011 /* rate used to send multicast frames */ 1012 rates.McastRate = rates.MgtRate; 1013 1014 return mwl_hal_settxrate_auto(sc->sc_mh, &rates); 1015 } 1016 1017 /* 1018 * Inform firmware of tx rate parameters. Called whenever 1019 * user-settable params change and after a channel change. 1020 */ 1021 static int 1022 mwl_setrates(struct ieee80211vap *vap) 1023 { 1024 struct mwl_vap *mvp = MWL_VAP(vap); 1025 struct ieee80211_node *ni = vap->iv_bss; 1026 const struct ieee80211_txparam *tp = ni->ni_txparms; 1027 MWL_HAL_TXRATE rates; 1028 1029 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); 1030 1031 /* 1032 * Update the h/w rate map. 1033 * NB: 0x80 for MCS is passed through unchanged 1034 */ 1035 memset(&rates, 0, sizeof(rates)); 1036 /* rate used to send management frames */ 1037 rates.MgtRate = tp->mgmtrate; 1038 /* rate used to send multicast frames */ 1039 rates.McastRate = tp->mcastrate; 1040 1041 /* while here calculate EAPOL fixed rate cookie */ 1042 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni)); 1043 1044 return mwl_hal_settxrate(mvp->mv_hvap, 1045 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ? 1046 RATE_FIXED : RATE_AUTO, &rates); 1047 } 1048 1049 /* 1050 * Setup a fixed xmit rate cookie for EAPOL frames. 1051 */ 1052 static void 1053 mwl_seteapolformat(struct ieee80211vap *vap) 1054 { 1055 struct mwl_vap *mvp = MWL_VAP(vap); 1056 struct ieee80211_node *ni = vap->iv_bss; 1057 enum ieee80211_phymode mode; 1058 uint8_t rate; 1059 1060 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); 1061 1062 mode = ieee80211_chan2mode(ni->ni_chan); 1063 /* 1064 * Use legacy rates when operating a mixed HT+non-HT bss. 1065 * NB: this may violate POLA for sta and wds vap's. 1066 */ 1067 if (mode == IEEE80211_MODE_11NA && 1068 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) 1069 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate; 1070 else if (mode == IEEE80211_MODE_11NG && 1071 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) 1072 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate; 1073 else 1074 rate = vap->iv_txparms[mode].mgmtrate; 1075 1076 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni)); 1077 } 1078 1079 /* 1080 * Map SKU+country code to region code for radar bin'ing. 1081 */ 1082 static int 1083 mwl_map2regioncode(const struct ieee80211_regdomain *rd) 1084 { 1085 switch (rd->regdomain) { 1086 case SKU_FCC: 1087 case SKU_FCC3: 1088 return DOMAIN_CODE_FCC; 1089 case SKU_CA: 1090 return DOMAIN_CODE_IC; 1091 case SKU_ETSI: 1092 case SKU_ETSI2: 1093 case SKU_ETSI3: 1094 if (rd->country == CTRY_SPAIN) 1095 return DOMAIN_CODE_SPAIN; 1096 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2) 1097 return DOMAIN_CODE_FRANCE; 1098 /* XXX force 1.3.1 radar type */ 1099 return DOMAIN_CODE_ETSI_131; 1100 case SKU_JAPAN: 1101 return DOMAIN_CODE_MKK; 1102 case SKU_ROW: 1103 return DOMAIN_CODE_DGT; /* Taiwan */ 1104 case SKU_APAC: 1105 case SKU_APAC2: 1106 case SKU_APAC3: 1107 return DOMAIN_CODE_AUS; /* Australia */ 1108 } 1109 /* XXX KOREA? */ 1110 return DOMAIN_CODE_FCC; /* XXX? */ 1111 } 1112 1113 static int 1114 mwl_hal_reset(struct mwl_softc *sc) 1115 { 1116 struct ieee80211com *ic = &sc->sc_ic; 1117 struct mwl_hal *mh = sc->sc_mh; 1118 1119 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna); 1120 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna); 1121 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE); 1122 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0); 1123 mwl_chan_set(sc, ic->ic_curchan); 1124 /* NB: RF/RA performance tuned for indoor mode */ 1125 mwl_hal_setrateadaptmode(mh, 0); 1126 mwl_hal_setoptimizationlevel(mh, 1127 (ic->ic_flags & IEEE80211_F_BURST) != 0); 1128 1129 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain)); 1130 1131 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */ 1132 mwl_hal_setcfend(mh, 0); /* XXX */ 1133 1134 return 1; 1135 } 1136 1137 static int 1138 mwl_init(struct mwl_softc *sc) 1139 { 1140 struct mwl_hal *mh = sc->sc_mh; 1141 int error = 0; 1142 1143 MWL_LOCK_ASSERT(sc); 1144 1145 /* 1146 * Stop anything previously setup. This is safe 1147 * whether this is the first time through or not. 1148 */ 1149 mwl_stop(sc); 1150 1151 /* 1152 * Push vap-independent state to the firmware. 1153 */ 1154 if (!mwl_hal_reset(sc)) { 1155 device_printf(sc->sc_dev, "unable to reset hardware\n"); 1156 return EIO; 1157 } 1158 1159 /* 1160 * Setup recv (once); transmit is already good to go. 1161 */ 1162 error = mwl_startrecv(sc); 1163 if (error != 0) { 1164 device_printf(sc->sc_dev, "unable to start recv logic\n"); 1165 return error; 1166 } 1167 1168 /* 1169 * Enable interrupts. 1170 */ 1171 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY 1172 | MACREG_A2HRIC_BIT_TX_DONE 1173 | MACREG_A2HRIC_BIT_OPC_DONE 1174 #if 0 1175 | MACREG_A2HRIC_BIT_MAC_EVENT 1176 #endif 1177 | MACREG_A2HRIC_BIT_ICV_ERROR 1178 | MACREG_A2HRIC_BIT_RADAR_DETECT 1179 | MACREG_A2HRIC_BIT_CHAN_SWITCH 1180 #if 0 1181 | MACREG_A2HRIC_BIT_QUEUE_EMPTY 1182 #endif 1183 | MACREG_A2HRIC_BIT_BA_WATCHDOG 1184 | MACREQ_A2HRIC_BIT_TX_ACK 1185 ; 1186 1187 sc->sc_running = 1; 1188 mwl_hal_intrset(mh, sc->sc_imask); 1189 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); 1190 1191 return 0; 1192 } 1193 1194 static void 1195 mwl_stop(struct mwl_softc *sc) 1196 { 1197 1198 MWL_LOCK_ASSERT(sc); 1199 if (sc->sc_running) { 1200 /* 1201 * Shutdown the hardware and driver. 1202 */ 1203 sc->sc_running = 0; 1204 callout_stop(&sc->sc_watchdog); 1205 sc->sc_tx_timer = 0; 1206 mwl_draintxq(sc); 1207 } 1208 } 1209 1210 static int 1211 mwl_reset_vap(struct ieee80211vap *vap, int state) 1212 { 1213 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1214 struct ieee80211com *ic = vap->iv_ic; 1215 1216 if (state == IEEE80211_S_RUN) 1217 mwl_setrates(vap); 1218 /* XXX off by 1? */ 1219 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); 1220 /* XXX auto? 20/40 split? */ 1221 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht & 1222 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0); 1223 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ? 1224 HTPROTECT_NONE : HTPROTECT_AUTO); 1225 /* XXX txpower cap */ 1226 1227 /* re-setup beacons */ 1228 if (state == IEEE80211_S_RUN && 1229 (vap->iv_opmode == IEEE80211_M_HOSTAP || 1230 vap->iv_opmode == IEEE80211_M_MBSS || 1231 vap->iv_opmode == IEEE80211_M_IBSS)) { 1232 mwl_setapmode(vap, vap->iv_bss->ni_chan); 1233 mwl_hal_setnprotmode(hvap, 1234 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); 1235 return mwl_beacon_setup(vap); 1236 } 1237 return 0; 1238 } 1239 1240 /* 1241 * Reset the hardware w/o losing operational state. 1242 * Used to reset or reload hardware state for a vap. 1243 */ 1244 static int 1245 mwl_reset(struct ieee80211vap *vap, u_long cmd) 1246 { 1247 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1248 int error = 0; 1249 1250 if (hvap != NULL) { /* WDS, MONITOR, etc. */ 1251 struct ieee80211com *ic = vap->iv_ic; 1252 struct mwl_softc *sc = ic->ic_softc; 1253 struct mwl_hal *mh = sc->sc_mh; 1254 1255 /* XXX handle DWDS sta vap change */ 1256 /* XXX do we need to disable interrupts? */ 1257 mwl_hal_intrset(mh, 0); /* disable interrupts */ 1258 error = mwl_reset_vap(vap, vap->iv_state); 1259 mwl_hal_intrset(mh, sc->sc_imask); 1260 } 1261 return error; 1262 } 1263 1264 /* 1265 * Allocate a tx buffer for sending a frame. The 1266 * packet is assumed to have the WME AC stored so 1267 * we can use it to select the appropriate h/w queue. 1268 */ 1269 static struct mwl_txbuf * 1270 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq) 1271 { 1272 struct mwl_txbuf *bf; 1273 1274 /* 1275 * Grab a TX buffer and associated resources. 1276 */ 1277 MWL_TXQ_LOCK(txq); 1278 bf = STAILQ_FIRST(&txq->free); 1279 if (bf != NULL) { 1280 STAILQ_REMOVE_HEAD(&txq->free, bf_list); 1281 txq->nfree--; 1282 } 1283 MWL_TXQ_UNLOCK(txq); 1284 if (bf == NULL) 1285 DPRINTF(sc, MWL_DEBUG_XMIT, 1286 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); 1287 return bf; 1288 } 1289 1290 /* 1291 * Return a tx buffer to the queue it came from. Note there 1292 * are two cases because we must preserve the order of buffers 1293 * as it reflects the fixed order of descriptors in memory 1294 * (the firmware pre-fetches descriptors so we cannot reorder). 1295 */ 1296 static void 1297 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf) 1298 { 1299 bf->bf_m = NULL; 1300 bf->bf_node = NULL; 1301 MWL_TXQ_LOCK(txq); 1302 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); 1303 txq->nfree++; 1304 MWL_TXQ_UNLOCK(txq); 1305 } 1306 1307 static void 1308 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf) 1309 { 1310 bf->bf_m = NULL; 1311 bf->bf_node = NULL; 1312 MWL_TXQ_LOCK(txq); 1313 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); 1314 txq->nfree++; 1315 MWL_TXQ_UNLOCK(txq); 1316 } 1317 1318 static int 1319 mwl_transmit(struct ieee80211com *ic, struct mbuf *m) 1320 { 1321 struct mwl_softc *sc = ic->ic_softc; 1322 int error; 1323 1324 MWL_LOCK(sc); 1325 if (!sc->sc_running) { 1326 MWL_UNLOCK(sc); 1327 return (ENXIO); 1328 } 1329 error = mbufq_enqueue(&sc->sc_snd, m); 1330 if (error) { 1331 MWL_UNLOCK(sc); 1332 return (error); 1333 } 1334 mwl_start(sc); 1335 MWL_UNLOCK(sc); 1336 return (0); 1337 } 1338 1339 static void 1340 mwl_start(struct mwl_softc *sc) 1341 { 1342 struct ieee80211_node *ni; 1343 struct mwl_txbuf *bf; 1344 struct mbuf *m; 1345 struct mwl_txq *txq = NULL; /* XXX silence gcc */ 1346 int nqueued; 1347 1348 MWL_LOCK_ASSERT(sc); 1349 if (!sc->sc_running || sc->sc_invalid) 1350 return; 1351 nqueued = 0; 1352 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 1353 /* 1354 * Grab the node for the destination. 1355 */ 1356 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1357 KASSERT(ni != NULL, ("no node")); 1358 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */ 1359 /* 1360 * Grab a TX buffer and associated resources. 1361 * We honor the classification by the 802.11 layer. 1362 */ 1363 txq = sc->sc_ac2q[M_WME_GETAC(m)]; 1364 bf = mwl_gettxbuf(sc, txq); 1365 if (bf == NULL) { 1366 m_freem(m); 1367 ieee80211_free_node(ni); 1368 #ifdef MWL_TX_NODROP 1369 sc->sc_stats.mst_tx_qstop++; 1370 break; 1371 #else 1372 DPRINTF(sc, MWL_DEBUG_XMIT, 1373 "%s: tail drop on q %d\n", __func__, txq->qnum); 1374 sc->sc_stats.mst_tx_qdrop++; 1375 continue; 1376 #endif /* MWL_TX_NODROP */ 1377 } 1378 1379 /* 1380 * Pass the frame to the h/w for transmission. 1381 */ 1382 if (mwl_tx_start(sc, ni, bf, m)) { 1383 if_inc_counter(ni->ni_vap->iv_ifp, 1384 IFCOUNTER_OERRORS, 1); 1385 mwl_puttxbuf_head(txq, bf); 1386 ieee80211_free_node(ni); 1387 continue; 1388 } 1389 nqueued++; 1390 if (nqueued >= mwl_txcoalesce) { 1391 /* 1392 * Poke the firmware to process queued frames; 1393 * see below about (lack of) locking. 1394 */ 1395 nqueued = 0; 1396 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); 1397 } 1398 } 1399 if (nqueued) { 1400 /* 1401 * NB: We don't need to lock against tx done because 1402 * this just prods the firmware to check the transmit 1403 * descriptors. The firmware will also start fetching 1404 * descriptors by itself if it notices new ones are 1405 * present when it goes to deliver a tx done interrupt 1406 * to the host. So if we race with tx done processing 1407 * it's ok. Delivering the kick here rather than in 1408 * mwl_tx_start is an optimization to avoid poking the 1409 * firmware for each packet. 1410 * 1411 * NB: the queue id isn't used so 0 is ok. 1412 */ 1413 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); 1414 } 1415 } 1416 1417 static int 1418 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 1419 const struct ieee80211_bpf_params *params) 1420 { 1421 struct ieee80211com *ic = ni->ni_ic; 1422 struct mwl_softc *sc = ic->ic_softc; 1423 struct mwl_txbuf *bf; 1424 struct mwl_txq *txq; 1425 1426 if (!sc->sc_running || sc->sc_invalid) { 1427 m_freem(m); 1428 return ENETDOWN; 1429 } 1430 /* 1431 * Grab a TX buffer and associated resources. 1432 * Note that we depend on the classification 1433 * by the 802.11 layer to get to the right h/w 1434 * queue. Management frames must ALWAYS go on 1435 * queue 1 but we cannot just force that here 1436 * because we may receive non-mgt frames. 1437 */ 1438 txq = sc->sc_ac2q[M_WME_GETAC(m)]; 1439 bf = mwl_gettxbuf(sc, txq); 1440 if (bf == NULL) { 1441 sc->sc_stats.mst_tx_qstop++; 1442 m_freem(m); 1443 return ENOBUFS; 1444 } 1445 /* 1446 * Pass the frame to the h/w for transmission. 1447 */ 1448 if (mwl_tx_start(sc, ni, bf, m)) { 1449 mwl_puttxbuf_head(txq, bf); 1450 1451 return EIO; /* XXX */ 1452 } 1453 /* 1454 * NB: We don't need to lock against tx done because 1455 * this just prods the firmware to check the transmit 1456 * descriptors. The firmware will also start fetching 1457 * descriptors by itself if it notices new ones are 1458 * present when it goes to deliver a tx done interrupt 1459 * to the host. So if we race with tx done processing 1460 * it's ok. Delivering the kick here rather than in 1461 * mwl_tx_start is an optimization to avoid poking the 1462 * firmware for each packet. 1463 * 1464 * NB: the queue id isn't used so 0 is ok. 1465 */ 1466 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); 1467 return 0; 1468 } 1469 1470 static int 1471 mwl_media_change(struct ifnet *ifp) 1472 { 1473 struct ieee80211vap *vap = ifp->if_softc; 1474 int error; 1475 1476 error = ieee80211_media_change(ifp); 1477 /* NB: only the fixed rate can change and that doesn't need a reset */ 1478 if (error == ENETRESET) { 1479 mwl_setrates(vap); 1480 error = 0; 1481 } 1482 return error; 1483 } 1484 1485 #ifdef MWL_DEBUG 1486 static void 1487 mwl_keyprint(struct mwl_softc *sc, const char *tag, 1488 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN]) 1489 { 1490 static const char *ciphers[] = { 1491 "WEP", 1492 "TKIP", 1493 "AES-CCM", 1494 }; 1495 int i, n; 1496 1497 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]); 1498 for (i = 0, n = hk->keyLen; i < n; i++) 1499 printf(" %02x", hk->key.aes[i]); 1500 printf(" mac %s", ether_sprintf(mac)); 1501 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) { 1502 printf(" %s", "rxmic"); 1503 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++) 1504 printf(" %02x", hk->key.tkip.rxMic[i]); 1505 printf(" txmic"); 1506 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++) 1507 printf(" %02x", hk->key.tkip.txMic[i]); 1508 } 1509 printf(" flags 0x%x\n", hk->keyFlags); 1510 } 1511 #endif 1512 1513 /* 1514 * Allocate a key cache slot for a unicast key. The 1515 * firmware handles key allocation and every station is 1516 * guaranteed key space so we are always successful. 1517 */ 1518 static int 1519 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, 1520 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 1521 { 1522 struct mwl_softc *sc = vap->iv_ic->ic_softc; 1523 1524 if (k->wk_keyix != IEEE80211_KEYIX_NONE || 1525 (k->wk_flags & IEEE80211_KEY_GROUP)) { 1526 if (!(&vap->iv_nw_keys[0] <= k && 1527 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { 1528 /* should not happen */ 1529 DPRINTF(sc, MWL_DEBUG_KEYCACHE, 1530 "%s: bogus group key\n", __func__); 1531 return 0; 1532 } 1533 /* give the caller what they requested */ 1534 *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k); 1535 } else { 1536 /* 1537 * Firmware handles key allocation. 1538 */ 1539 *keyix = *rxkeyix = 0; 1540 } 1541 return 1; 1542 } 1543 1544 /* 1545 * Delete a key entry allocated by mwl_key_alloc. 1546 */ 1547 static int 1548 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 1549 { 1550 struct mwl_softc *sc = vap->iv_ic->ic_softc; 1551 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1552 MWL_HAL_KEYVAL hk; 1553 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] = 1554 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1555 1556 if (hvap == NULL) { 1557 if (vap->iv_opmode != IEEE80211_M_WDS) { 1558 /* XXX monitor mode? */ 1559 DPRINTF(sc, MWL_DEBUG_KEYCACHE, 1560 "%s: no hvap for opmode %d\n", __func__, 1561 vap->iv_opmode); 1562 return 0; 1563 } 1564 hvap = MWL_VAP(vap)->mv_ap_hvap; 1565 } 1566 1567 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n", 1568 __func__, k->wk_keyix); 1569 1570 memset(&hk, 0, sizeof(hk)); 1571 hk.keyIndex = k->wk_keyix; 1572 switch (k->wk_cipher->ic_cipher) { 1573 case IEEE80211_CIPHER_WEP: 1574 hk.keyTypeId = KEY_TYPE_ID_WEP; 1575 break; 1576 case IEEE80211_CIPHER_TKIP: 1577 hk.keyTypeId = KEY_TYPE_ID_TKIP; 1578 break; 1579 case IEEE80211_CIPHER_AES_CCM: 1580 hk.keyTypeId = KEY_TYPE_ID_AES; 1581 break; 1582 default: 1583 /* XXX should not happen */ 1584 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", 1585 __func__, k->wk_cipher->ic_cipher); 1586 return 0; 1587 } 1588 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/ 1589 } 1590 1591 static __inline int 1592 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k) 1593 { 1594 if (k->wk_flags & IEEE80211_KEY_GROUP) { 1595 if (k->wk_flags & IEEE80211_KEY_XMIT) 1596 hk->keyFlags |= KEY_FLAG_TXGROUPKEY; 1597 if (k->wk_flags & IEEE80211_KEY_RECV) 1598 hk->keyFlags |= KEY_FLAG_RXGROUPKEY; 1599 return 1; 1600 } else 1601 return 0; 1602 } 1603 1604 /* 1605 * Set the key cache contents for the specified key. Key cache 1606 * slot(s) must already have been allocated by mwl_key_alloc. 1607 */ 1608 static int 1609 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 1610 { 1611 return (_mwl_key_set(vap, k, k->wk_macaddr)); 1612 } 1613 1614 static int 1615 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 1616 const uint8_t mac[IEEE80211_ADDR_LEN]) 1617 { 1618 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP) 1619 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */ 1620 #define IEEE80211_IS_STATICKEY(k) \ 1621 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \ 1622 (GRPXMIT|IEEE80211_KEY_RECV)) 1623 struct mwl_softc *sc = vap->iv_ic->ic_softc; 1624 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1625 const struct ieee80211_cipher *cip = k->wk_cipher; 1626 const uint8_t *macaddr; 1627 MWL_HAL_KEYVAL hk; 1628 1629 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0, 1630 ("s/w crypto set?")); 1631 1632 if (hvap == NULL) { 1633 if (vap->iv_opmode != IEEE80211_M_WDS) { 1634 /* XXX monitor mode? */ 1635 DPRINTF(sc, MWL_DEBUG_KEYCACHE, 1636 "%s: no hvap for opmode %d\n", __func__, 1637 vap->iv_opmode); 1638 return 0; 1639 } 1640 hvap = MWL_VAP(vap)->mv_ap_hvap; 1641 } 1642 memset(&hk, 0, sizeof(hk)); 1643 hk.keyIndex = k->wk_keyix; 1644 switch (cip->ic_cipher) { 1645 case IEEE80211_CIPHER_WEP: 1646 hk.keyTypeId = KEY_TYPE_ID_WEP; 1647 hk.keyLen = k->wk_keylen; 1648 if (k->wk_keyix == vap->iv_def_txkey) 1649 hk.keyFlags = KEY_FLAG_WEP_TXKEY; 1650 if (!IEEE80211_IS_STATICKEY(k)) { 1651 /* NB: WEP is never used for the PTK */ 1652 (void) addgroupflags(&hk, k); 1653 } 1654 break; 1655 case IEEE80211_CIPHER_TKIP: 1656 hk.keyTypeId = KEY_TYPE_ID_TKIP; 1657 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16); 1658 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc; 1659 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID; 1660 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE; 1661 if (!addgroupflags(&hk, k)) 1662 hk.keyFlags |= KEY_FLAG_PAIRWISE; 1663 break; 1664 case IEEE80211_CIPHER_AES_CCM: 1665 hk.keyTypeId = KEY_TYPE_ID_AES; 1666 hk.keyLen = k->wk_keylen; 1667 if (!addgroupflags(&hk, k)) 1668 hk.keyFlags |= KEY_FLAG_PAIRWISE; 1669 break; 1670 default: 1671 /* XXX should not happen */ 1672 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", 1673 __func__, k->wk_cipher->ic_cipher); 1674 return 0; 1675 } 1676 /* 1677 * NB: tkip mic keys get copied here too; the layout 1678 * just happens to match that in ieee80211_key. 1679 */ 1680 memcpy(hk.key.aes, k->wk_key, hk.keyLen); 1681 1682 /* 1683 * Locate address of sta db entry for writing key; 1684 * the convention unfortunately is somewhat different 1685 * than how net80211, hostapd, and wpa_supplicant think. 1686 */ 1687 if (vap->iv_opmode == IEEE80211_M_STA) { 1688 /* 1689 * NB: keys plumbed before the sta reaches AUTH state 1690 * will be discarded or written to the wrong sta db 1691 * entry because iv_bss is meaningless. This is ok 1692 * (right now) because we handle deferred plumbing of 1693 * WEP keys when the sta reaches AUTH state. 1694 */ 1695 macaddr = vap->iv_bss->ni_bssid; 1696 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) { 1697 /* XXX plumb to local sta db too for static key wep */ 1698 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr); 1699 } 1700 } else if (vap->iv_opmode == IEEE80211_M_WDS && 1701 vap->iv_state != IEEE80211_S_RUN) { 1702 /* 1703 * Prior to RUN state a WDS vap will not it's BSS node 1704 * setup so we will plumb the key to the wrong mac 1705 * address (it'll be our local address). Workaround 1706 * this for the moment by grabbing the correct address. 1707 */ 1708 macaddr = vap->iv_des_bssid; 1709 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT) 1710 macaddr = vap->iv_myaddr; 1711 else 1712 macaddr = mac; 1713 KEYPRINTF(sc, &hk, macaddr); 1714 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0); 1715 #undef IEEE80211_IS_STATICKEY 1716 #undef GRPXMIT 1717 } 1718 1719 /* 1720 * Set the multicast filter contents into the hardware. 1721 * XXX f/w has no support; just defer to the os. 1722 */ 1723 static void 1724 mwl_setmcastfilter(struct mwl_softc *sc) 1725 { 1726 #if 0 1727 struct ether_multi *enm; 1728 struct ether_multistep estep; 1729 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */ 1730 uint8_t *mp; 1731 int nmc; 1732 1733 mp = macs; 1734 nmc = 0; 1735 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); 1736 while (enm != NULL) { 1737 /* XXX Punt on ranges. */ 1738 if (nmc == MWL_HAL_MCAST_MAX || 1739 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { 1740 ifp->if_flags |= IFF_ALLMULTI; 1741 return; 1742 } 1743 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo); 1744 mp += IEEE80211_ADDR_LEN, nmc++; 1745 ETHER_NEXT_MULTI(estep, enm); 1746 } 1747 ifp->if_flags &= ~IFF_ALLMULTI; 1748 mwl_hal_setmcast(sc->sc_mh, nmc, macs); 1749 #endif 1750 } 1751 1752 static int 1753 mwl_mode_init(struct mwl_softc *sc) 1754 { 1755 struct ieee80211com *ic = &sc->sc_ic; 1756 struct mwl_hal *mh = sc->sc_mh; 1757 1758 mwl_hal_setpromisc(mh, ic->ic_promisc > 0); 1759 mwl_setmcastfilter(sc); 1760 1761 return 0; 1762 } 1763 1764 /* 1765 * Callback from the 802.11 layer after a multicast state change. 1766 */ 1767 static void 1768 mwl_update_mcast(struct ieee80211com *ic) 1769 { 1770 struct mwl_softc *sc = ic->ic_softc; 1771 1772 mwl_setmcastfilter(sc); 1773 } 1774 1775 /* 1776 * Callback from the 802.11 layer after a promiscuous mode change. 1777 * Note this interface does not check the operating mode as this 1778 * is an internal callback and we are expected to honor the current 1779 * state (e.g. this is used for setting the interface in promiscuous 1780 * mode when operating in hostap mode to do ACS). 1781 */ 1782 static void 1783 mwl_update_promisc(struct ieee80211com *ic) 1784 { 1785 struct mwl_softc *sc = ic->ic_softc; 1786 1787 mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0); 1788 } 1789 1790 /* 1791 * Callback from the 802.11 layer to update the slot time 1792 * based on the current setting. We use it to notify the 1793 * firmware of ERP changes and the f/w takes care of things 1794 * like slot time and preamble. 1795 */ 1796 static void 1797 mwl_updateslot(struct ieee80211com *ic) 1798 { 1799 struct mwl_softc *sc = ic->ic_softc; 1800 struct mwl_hal *mh = sc->sc_mh; 1801 int prot; 1802 1803 /* NB: can be called early; suppress needless cmds */ 1804 if (!sc->sc_running) 1805 return; 1806 1807 /* 1808 * Calculate the ERP flags. The firwmare will use 1809 * this to carry out the appropriate measures. 1810 */ 1811 prot = 0; 1812 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 1813 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0) 1814 prot |= IEEE80211_ERP_NON_ERP_PRESENT; 1815 if (ic->ic_flags & IEEE80211_F_USEPROT) 1816 prot |= IEEE80211_ERP_USE_PROTECTION; 1817 if (ic->ic_flags & IEEE80211_F_USEBARKER) 1818 prot |= IEEE80211_ERP_LONG_PREAMBLE; 1819 } 1820 1821 DPRINTF(sc, MWL_DEBUG_RESET, 1822 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n", 1823 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 1824 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot, 1825 ic->ic_flags); 1826 1827 mwl_hal_setgprot(mh, prot); 1828 } 1829 1830 /* 1831 * Setup the beacon frame. 1832 */ 1833 static int 1834 mwl_beacon_setup(struct ieee80211vap *vap) 1835 { 1836 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1837 struct ieee80211_node *ni = vap->iv_bss; 1838 struct mbuf *m; 1839 1840 m = ieee80211_beacon_alloc(ni); 1841 if (m == NULL) 1842 return ENOBUFS; 1843 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len); 1844 m_free(m); 1845 1846 return 0; 1847 } 1848 1849 /* 1850 * Update the beacon frame in response to a change. 1851 */ 1852 static void 1853 mwl_beacon_update(struct ieee80211vap *vap, int item) 1854 { 1855 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 1856 struct ieee80211com *ic = vap->iv_ic; 1857 1858 KASSERT(hvap != NULL, ("no beacon")); 1859 switch (item) { 1860 case IEEE80211_BEACON_ERP: 1861 mwl_updateslot(ic); 1862 break; 1863 case IEEE80211_BEACON_HTINFO: 1864 mwl_hal_setnprotmode(hvap, 1865 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); 1866 break; 1867 case IEEE80211_BEACON_CAPS: 1868 case IEEE80211_BEACON_WME: 1869 case IEEE80211_BEACON_APPIE: 1870 case IEEE80211_BEACON_CSA: 1871 break; 1872 case IEEE80211_BEACON_TIM: 1873 /* NB: firmware always forms TIM */ 1874 return; 1875 } 1876 /* XXX retain beacon frame and update */ 1877 mwl_beacon_setup(vap); 1878 } 1879 1880 static void 1881 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1882 { 1883 bus_addr_t *paddr = (bus_addr_t*) arg; 1884 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 1885 *paddr = segs->ds_addr; 1886 } 1887 1888 #ifdef MWL_HOST_PS_SUPPORT 1889 /* 1890 * Handle power save station occupancy changes. 1891 */ 1892 static void 1893 mwl_update_ps(struct ieee80211vap *vap, int nsta) 1894 { 1895 struct mwl_vap *mvp = MWL_VAP(vap); 1896 1897 if (nsta == 0 || mvp->mv_last_ps_sta == 0) 1898 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta); 1899 mvp->mv_last_ps_sta = nsta; 1900 } 1901 1902 /* 1903 * Handle associated station power save state changes. 1904 */ 1905 static int 1906 mwl_set_tim(struct ieee80211_node *ni, int set) 1907 { 1908 struct ieee80211vap *vap = ni->ni_vap; 1909 struct mwl_vap *mvp = MWL_VAP(vap); 1910 1911 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */ 1912 mwl_hal_setpowersave_sta(mvp->mv_hvap, 1913 IEEE80211_AID(ni->ni_associd), set); 1914 return 1; 1915 } else 1916 return 0; 1917 } 1918 #endif /* MWL_HOST_PS_SUPPORT */ 1919 1920 static int 1921 mwl_desc_setup(struct mwl_softc *sc, const char *name, 1922 struct mwl_descdma *dd, 1923 int nbuf, size_t bufsize, int ndesc, size_t descsize) 1924 { 1925 uint8_t *ds; 1926 int error; 1927 1928 DPRINTF(sc, MWL_DEBUG_RESET, 1929 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", 1930 __func__, name, nbuf, (uintmax_t) bufsize, 1931 ndesc, (uintmax_t) descsize); 1932 1933 dd->dd_name = name; 1934 dd->dd_desc_len = nbuf * ndesc * descsize; 1935 1936 /* 1937 * Setup DMA descriptor area. 1938 */ 1939 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1940 PAGE_SIZE, 0, /* alignment, bounds */ 1941 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1942 BUS_SPACE_MAXADDR, /* highaddr */ 1943 NULL, NULL, /* filter, filterarg */ 1944 dd->dd_desc_len, /* maxsize */ 1945 1, /* nsegments */ 1946 dd->dd_desc_len, /* maxsegsize */ 1947 BUS_DMA_ALLOCNOW, /* flags */ 1948 NULL, /* lockfunc */ 1949 NULL, /* lockarg */ 1950 &dd->dd_dmat); 1951 if (error != 0) { 1952 device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name); 1953 return error; 1954 } 1955 1956 /* allocate descriptors */ 1957 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 1958 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 1959 &dd->dd_dmamap); 1960 if (error != 0) { 1961 device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, " 1962 "error %u\n", nbuf * ndesc, dd->dd_name, error); 1963 goto fail1; 1964 } 1965 1966 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 1967 dd->dd_desc, dd->dd_desc_len, 1968 mwl_load_cb, &dd->dd_desc_paddr, 1969 BUS_DMA_NOWAIT); 1970 if (error != 0) { 1971 device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n", 1972 dd->dd_name, error); 1973 goto fail2; 1974 } 1975 1976 ds = dd->dd_desc; 1977 memset(ds, 0, dd->dd_desc_len); 1978 DPRINTF(sc, MWL_DEBUG_RESET, 1979 "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", 1980 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 1981 (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 1982 1983 return 0; 1984 fail2: 1985 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 1986 fail1: 1987 bus_dma_tag_destroy(dd->dd_dmat); 1988 memset(dd, 0, sizeof(*dd)); 1989 return error; 1990 #undef DS2PHYS 1991 } 1992 1993 static void 1994 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd) 1995 { 1996 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 1997 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 1998 bus_dma_tag_destroy(dd->dd_dmat); 1999 2000 memset(dd, 0, sizeof(*dd)); 2001 } 2002 2003 /* 2004 * Construct a tx q's free list. The order of entries on 2005 * the list must reflect the physical layout of tx descriptors 2006 * because the firmware pre-fetches descriptors. 2007 * 2008 * XXX might be better to use indices into the buffer array. 2009 */ 2010 static void 2011 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq) 2012 { 2013 struct mwl_txbuf *bf; 2014 int i; 2015 2016 bf = txq->dma.dd_bufptr; 2017 STAILQ_INIT(&txq->free); 2018 for (i = 0; i < mwl_txbuf; i++, bf++) 2019 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); 2020 txq->nfree = i; 2021 } 2022 2023 #define DS2PHYS(_dd, _ds) \ 2024 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2025 2026 static int 2027 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq) 2028 { 2029 int error, bsize, i; 2030 struct mwl_txbuf *bf; 2031 struct mwl_txdesc *ds; 2032 2033 error = mwl_desc_setup(sc, "tx", &txq->dma, 2034 mwl_txbuf, sizeof(struct mwl_txbuf), 2035 MWL_TXDESC, sizeof(struct mwl_txdesc)); 2036 if (error != 0) 2037 return error; 2038 2039 /* allocate and setup tx buffers */ 2040 bsize = mwl_txbuf * sizeof(struct mwl_txbuf); 2041 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); 2042 if (bf == NULL) { 2043 device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n", 2044 mwl_txbuf); 2045 return ENOMEM; 2046 } 2047 txq->dma.dd_bufptr = bf; 2048 2049 ds = txq->dma.dd_desc; 2050 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) { 2051 bf->bf_desc = ds; 2052 bf->bf_daddr = DS2PHYS(&txq->dma, ds); 2053 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2054 &bf->bf_dmamap); 2055 if (error != 0) { 2056 device_printf(sc->sc_dev, "unable to create dmamap for tx " 2057 "buffer %u, error %u\n", i, error); 2058 return error; 2059 } 2060 } 2061 mwl_txq_reset(sc, txq); 2062 return 0; 2063 } 2064 2065 static void 2066 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq) 2067 { 2068 struct mwl_txbuf *bf; 2069 int i; 2070 2071 bf = txq->dma.dd_bufptr; 2072 for (i = 0; i < mwl_txbuf; i++, bf++) { 2073 KASSERT(bf->bf_m == NULL, ("mbuf on free list")); 2074 KASSERT(bf->bf_node == NULL, ("node on free list")); 2075 if (bf->bf_dmamap != NULL) 2076 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2077 } 2078 STAILQ_INIT(&txq->free); 2079 txq->nfree = 0; 2080 if (txq->dma.dd_bufptr != NULL) { 2081 free(txq->dma.dd_bufptr, M_MWLDEV); 2082 txq->dma.dd_bufptr = NULL; 2083 } 2084 if (txq->dma.dd_desc_len != 0) 2085 mwl_desc_cleanup(sc, &txq->dma); 2086 } 2087 2088 static int 2089 mwl_rxdma_setup(struct mwl_softc *sc) 2090 { 2091 int error, jumbosize, bsize, i; 2092 struct mwl_rxbuf *bf; 2093 struct mwl_jumbo *rbuf; 2094 struct mwl_rxdesc *ds; 2095 caddr_t data; 2096 2097 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma, 2098 mwl_rxdesc, sizeof(struct mwl_rxbuf), 2099 1, sizeof(struct mwl_rxdesc)); 2100 if (error != 0) 2101 return error; 2102 2103 /* 2104 * Receive is done to a private pool of jumbo buffers. 2105 * This allows us to attach to mbuf's and avoid re-mapping 2106 * memory on each rx we post. We allocate a large chunk 2107 * of memory and manage it in the driver. The mbuf free 2108 * callback method is used to reclaim frames after sending 2109 * them up the stack. By default we allocate 2x the number of 2110 * rx descriptors configured so we have some slop to hold 2111 * us while frames are processed. 2112 */ 2113 if (mwl_rxbuf < 2*mwl_rxdesc) { 2114 device_printf(sc->sc_dev, 2115 "too few rx dma buffers (%d); increasing to %d\n", 2116 mwl_rxbuf, 2*mwl_rxdesc); 2117 mwl_rxbuf = 2*mwl_rxdesc; 2118 } 2119 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE); 2120 sc->sc_rxmemsize = mwl_rxbuf*jumbosize; 2121 2122 error = bus_dma_tag_create(sc->sc_dmat, /* parent */ 2123 PAGE_SIZE, 0, /* alignment, bounds */ 2124 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2125 BUS_SPACE_MAXADDR, /* highaddr */ 2126 NULL, NULL, /* filter, filterarg */ 2127 sc->sc_rxmemsize, /* maxsize */ 2128 1, /* nsegments */ 2129 sc->sc_rxmemsize, /* maxsegsize */ 2130 BUS_DMA_ALLOCNOW, /* flags */ 2131 NULL, /* lockfunc */ 2132 NULL, /* lockarg */ 2133 &sc->sc_rxdmat); 2134 if (error != 0) { 2135 device_printf(sc->sc_dev, "could not create rx DMA tag\n"); 2136 return error; 2137 } 2138 2139 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem, 2140 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2141 &sc->sc_rxmap); 2142 if (error != 0) { 2143 device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n", 2144 (uintmax_t) sc->sc_rxmemsize); 2145 return error; 2146 } 2147 2148 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap, 2149 sc->sc_rxmem, sc->sc_rxmemsize, 2150 mwl_load_cb, &sc->sc_rxmem_paddr, 2151 BUS_DMA_NOWAIT); 2152 if (error != 0) { 2153 device_printf(sc->sc_dev, "could not load rx DMA map\n"); 2154 return error; 2155 } 2156 2157 /* 2158 * Allocate rx buffers and set them up. 2159 */ 2160 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf); 2161 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); 2162 if (bf == NULL) { 2163 device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize); 2164 return error; 2165 } 2166 sc->sc_rxdma.dd_bufptr = bf; 2167 2168 STAILQ_INIT(&sc->sc_rxbuf); 2169 ds = sc->sc_rxdma.dd_desc; 2170 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) { 2171 bf->bf_desc = ds; 2172 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds); 2173 /* pre-assign dma buffer */ 2174 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); 2175 /* NB: tail is intentional to preserve descriptor order */ 2176 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 2177 } 2178 2179 /* 2180 * Place remainder of dma memory buffers on the free list. 2181 */ 2182 SLIST_INIT(&sc->sc_rxfree); 2183 for (; i < mwl_rxbuf; i++) { 2184 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); 2185 rbuf = MWL_JUMBO_DATA2BUF(data); 2186 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next); 2187 sc->sc_nrxfree++; 2188 } 2189 return 0; 2190 } 2191 #undef DS2PHYS 2192 2193 static void 2194 mwl_rxdma_cleanup(struct mwl_softc *sc) 2195 { 2196 if (sc->sc_rxmem_paddr != 0) { 2197 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap); 2198 sc->sc_rxmem_paddr = 0; 2199 } 2200 if (sc->sc_rxmem != NULL) { 2201 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap); 2202 sc->sc_rxmem = NULL; 2203 } 2204 if (sc->sc_rxdma.dd_bufptr != NULL) { 2205 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV); 2206 sc->sc_rxdma.dd_bufptr = NULL; 2207 } 2208 if (sc->sc_rxdma.dd_desc_len != 0) 2209 mwl_desc_cleanup(sc, &sc->sc_rxdma); 2210 } 2211 2212 static int 2213 mwl_dma_setup(struct mwl_softc *sc) 2214 { 2215 int error, i; 2216 2217 error = mwl_rxdma_setup(sc); 2218 if (error != 0) { 2219 mwl_rxdma_cleanup(sc); 2220 return error; 2221 } 2222 2223 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { 2224 error = mwl_txdma_setup(sc, &sc->sc_txq[i]); 2225 if (error != 0) { 2226 mwl_dma_cleanup(sc); 2227 return error; 2228 } 2229 } 2230 return 0; 2231 } 2232 2233 static void 2234 mwl_dma_cleanup(struct mwl_softc *sc) 2235 { 2236 int i; 2237 2238 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) 2239 mwl_txdma_cleanup(sc, &sc->sc_txq[i]); 2240 mwl_rxdma_cleanup(sc); 2241 } 2242 2243 static struct ieee80211_node * 2244 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2245 { 2246 struct ieee80211com *ic = vap->iv_ic; 2247 struct mwl_softc *sc = ic->ic_softc; 2248 const size_t space = sizeof(struct mwl_node); 2249 struct mwl_node *mn; 2250 2251 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2252 if (mn == NULL) { 2253 /* XXX stat+msg */ 2254 return NULL; 2255 } 2256 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn); 2257 return &mn->mn_node; 2258 } 2259 2260 static void 2261 mwl_node_cleanup(struct ieee80211_node *ni) 2262 { 2263 struct ieee80211com *ic = ni->ni_ic; 2264 struct mwl_softc *sc = ic->ic_softc; 2265 struct mwl_node *mn = MWL_NODE(ni); 2266 2267 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n", 2268 __func__, ni, ni->ni_ic, mn->mn_staid); 2269 2270 if (mn->mn_staid != 0) { 2271 struct ieee80211vap *vap = ni->ni_vap; 2272 2273 if (mn->mn_hvap != NULL) { 2274 if (vap->iv_opmode == IEEE80211_M_STA) 2275 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr); 2276 else 2277 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr); 2278 } 2279 /* 2280 * NB: legacy WDS peer sta db entry is installed using 2281 * the associate ap's hvap; use it again to delete it. 2282 * XXX can vap be NULL? 2283 */ 2284 else if (vap->iv_opmode == IEEE80211_M_WDS && 2285 MWL_VAP(vap)->mv_ap_hvap != NULL) 2286 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap, 2287 ni->ni_macaddr); 2288 delstaid(sc, mn->mn_staid); 2289 mn->mn_staid = 0; 2290 } 2291 sc->sc_node_cleanup(ni); 2292 } 2293 2294 /* 2295 * Reclaim rx dma buffers from packets sitting on the ampdu 2296 * reorder queue for a station. We replace buffers with a 2297 * system cluster (if available). 2298 */ 2299 static void 2300 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap) 2301 { 2302 #if 0 2303 int i, n, off; 2304 struct mbuf *m; 2305 void *cl; 2306 2307 n = rap->rxa_qframes; 2308 for (i = 0; i < rap->rxa_wnd && n > 0; i++) { 2309 m = rap->rxa_m[i]; 2310 if (m == NULL) 2311 continue; 2312 n--; 2313 /* our dma buffers have a well-known free routine */ 2314 if ((m->m_flags & M_EXT) == 0 || 2315 m->m_ext.ext_free != mwl_ext_free) 2316 continue; 2317 /* 2318 * Try to allocate a cluster and move the data. 2319 */ 2320 off = m->m_data - m->m_ext.ext_buf; 2321 if (off + m->m_pkthdr.len > MCLBYTES) { 2322 /* XXX no AMSDU for now */ 2323 continue; 2324 } 2325 cl = pool_cache_get_paddr(&mclpool_cache, 0, 2326 &m->m_ext.ext_paddr); 2327 if (cl != NULL) { 2328 /* 2329 * Copy the existing data to the cluster, remove 2330 * the rx dma buffer, and attach the cluster in 2331 * its place. Note we preserve the offset to the 2332 * data so frames being bridged can still prepend 2333 * their headers without adding another mbuf. 2334 */ 2335 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len); 2336 MEXTREMOVE(m); 2337 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache); 2338 /* setup mbuf like _MCLGET does */ 2339 m->m_flags |= M_CLUSTER | M_EXT_RW; 2340 _MOWNERREF(m, M_EXT | M_CLUSTER); 2341 /* NB: m_data is clobbered by MEXTADDR, adjust */ 2342 m->m_data += off; 2343 } 2344 } 2345 #endif 2346 } 2347 2348 /* 2349 * Callback to reclaim resources. We first let the 2350 * net80211 layer do it's thing, then if we are still 2351 * blocked by a lack of rx dma buffers we walk the ampdu 2352 * reorder q's to reclaim buffers by copying to a system 2353 * cluster. 2354 */ 2355 static void 2356 mwl_node_drain(struct ieee80211_node *ni) 2357 { 2358 struct ieee80211com *ic = ni->ni_ic; 2359 struct mwl_softc *sc = ic->ic_softc; 2360 struct mwl_node *mn = MWL_NODE(ni); 2361 2362 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n", 2363 __func__, ni, ni->ni_vap, mn->mn_staid); 2364 2365 /* NB: call up first to age out ampdu q's */ 2366 sc->sc_node_drain(ni); 2367 2368 /* XXX better to not check low water mark? */ 2369 if (sc->sc_rxblocked && mn->mn_staid != 0 && 2370 (ni->ni_flags & IEEE80211_NODE_HT)) { 2371 uint8_t tid; 2372 /* 2373 * Walk the reorder q and reclaim rx dma buffers by copying 2374 * the packet contents into clusters. 2375 */ 2376 for (tid = 0; tid < WME_NUM_TID; tid++) { 2377 struct ieee80211_rx_ampdu *rap; 2378 2379 rap = &ni->ni_rx_ampdu[tid]; 2380 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) 2381 continue; 2382 if (rap->rxa_qframes) 2383 mwl_ampdu_rxdma_reclaim(rap); 2384 } 2385 } 2386 } 2387 2388 static void 2389 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 2390 { 2391 *rssi = ni->ni_ic->ic_node_getrssi(ni); 2392 #ifdef MWL_ANT_INFO_SUPPORT 2393 #if 0 2394 /* XXX need to smooth data */ 2395 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf; 2396 #else 2397 *noise = -95; /* XXX */ 2398 #endif 2399 #else 2400 *noise = -95; /* XXX */ 2401 #endif 2402 } 2403 2404 /* 2405 * Convert Hardware per-antenna rssi info to common format: 2406 * Let a1, a2, a3 represent the amplitudes per chain 2407 * Let amax represent max[a1, a2, a3] 2408 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax) 2409 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax) 2410 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or 2411 * maintain some extra precision. 2412 * 2413 * Values are stored in .5 db format capped at 127. 2414 */ 2415 static void 2416 mwl_node_getmimoinfo(const struct ieee80211_node *ni, 2417 struct ieee80211_mimo_info *mi) 2418 { 2419 #define CVT(_dst, _src) do { \ 2420 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \ 2421 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \ 2422 } while (0) 2423 static const int8_t logdbtbl[32] = { 2424 0, 0, 24, 38, 48, 56, 62, 68, 2425 72, 76, 80, 83, 86, 89, 92, 94, 2426 96, 98, 100, 102, 104, 106, 107, 109, 2427 110, 112, 113, 115, 116, 117, 118, 119 2428 }; 2429 const struct mwl_node *mn = MWL_NODE_CONST(ni); 2430 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */ 2431 uint32_t rssi_max; 2432 2433 rssi_max = mn->mn_ai.rssi_a; 2434 if (mn->mn_ai.rssi_b > rssi_max) 2435 rssi_max = mn->mn_ai.rssi_b; 2436 if (mn->mn_ai.rssi_c > rssi_max) 2437 rssi_max = mn->mn_ai.rssi_c; 2438 2439 CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a); 2440 CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b); 2441 CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c); 2442 2443 mi->ch[0].noise[0] = mn->mn_ai.nf_a; 2444 mi->ch[1].noise[0] = mn->mn_ai.nf_b; 2445 mi->ch[2].noise[0] = mn->mn_ai.nf_c; 2446 #undef CVT 2447 } 2448 2449 static __inline void * 2450 mwl_getrxdma(struct mwl_softc *sc) 2451 { 2452 struct mwl_jumbo *buf; 2453 void *data; 2454 2455 /* 2456 * Allocate from jumbo pool. 2457 */ 2458 MWL_RXFREE_LOCK(sc); 2459 buf = SLIST_FIRST(&sc->sc_rxfree); 2460 if (buf == NULL) { 2461 DPRINTF(sc, MWL_DEBUG_ANY, 2462 "%s: out of rx dma buffers\n", __func__); 2463 sc->sc_stats.mst_rx_nodmabuf++; 2464 data = NULL; 2465 } else { 2466 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next); 2467 sc->sc_nrxfree--; 2468 data = MWL_JUMBO_BUF2DATA(buf); 2469 } 2470 MWL_RXFREE_UNLOCK(sc); 2471 return data; 2472 } 2473 2474 static __inline void 2475 mwl_putrxdma(struct mwl_softc *sc, void *data) 2476 { 2477 struct mwl_jumbo *buf; 2478 2479 /* XXX bounds check data */ 2480 MWL_RXFREE_LOCK(sc); 2481 buf = MWL_JUMBO_DATA2BUF(data); 2482 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next); 2483 sc->sc_nrxfree++; 2484 MWL_RXFREE_UNLOCK(sc); 2485 } 2486 2487 static int 2488 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf) 2489 { 2490 struct mwl_rxdesc *ds; 2491 2492 ds = bf->bf_desc; 2493 if (bf->bf_data == NULL) { 2494 bf->bf_data = mwl_getrxdma(sc); 2495 if (bf->bf_data == NULL) { 2496 /* mark descriptor to be skipped */ 2497 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN; 2498 /* NB: don't need PREREAD */ 2499 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); 2500 sc->sc_stats.mst_rxbuf_failed++; 2501 return ENOMEM; 2502 } 2503 } 2504 /* 2505 * NB: DMA buffer contents is known to be unmodified 2506 * so there's no need to flush the data cache. 2507 */ 2508 2509 /* 2510 * Setup descriptor. 2511 */ 2512 ds->QosCtrl = 0; 2513 ds->RSSI = 0; 2514 ds->Status = EAGLE_RXD_STATUS_IDLE; 2515 ds->Channel = 0; 2516 ds->PktLen = htole16(MWL_AGGR_SIZE); 2517 ds->SQ2 = 0; 2518 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data)); 2519 /* NB: don't touch pPhysNext, set once */ 2520 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; 2521 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2522 2523 return 0; 2524 } 2525 2526 static void 2527 mwl_ext_free(struct mbuf *m) 2528 { 2529 struct mwl_softc *sc = m->m_ext.ext_arg1; 2530 2531 /* XXX bounds check data */ 2532 mwl_putrxdma(sc, m->m_ext.ext_buf); 2533 /* 2534 * If we were previously blocked by a lack of rx dma buffers 2535 * check if we now have enough to restart rx interrupt handling. 2536 * NB: we know we are called at splvm which is above splnet. 2537 */ 2538 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) { 2539 sc->sc_rxblocked = 0; 2540 mwl_hal_intrset(sc->sc_mh, sc->sc_imask); 2541 } 2542 } 2543 2544 struct mwl_frame_bar { 2545 u_int8_t i_fc[2]; 2546 u_int8_t i_dur[2]; 2547 u_int8_t i_ra[IEEE80211_ADDR_LEN]; 2548 u_int8_t i_ta[IEEE80211_ADDR_LEN]; 2549 /* ctl, seq, FCS */ 2550 } __packed; 2551 2552 /* 2553 * Like ieee80211_anyhdrsize, but handles BAR frames 2554 * specially so the logic below to piece the 802.11 2555 * header together works. 2556 */ 2557 static __inline int 2558 mwl_anyhdrsize(const void *data) 2559 { 2560 const struct ieee80211_frame *wh = data; 2561 2562 if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { 2563 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { 2564 case IEEE80211_FC0_SUBTYPE_CTS: 2565 case IEEE80211_FC0_SUBTYPE_ACK: 2566 return sizeof(struct ieee80211_frame_ack); 2567 case IEEE80211_FC0_SUBTYPE_BAR: 2568 return sizeof(struct mwl_frame_bar); 2569 } 2570 return sizeof(struct ieee80211_frame_min); 2571 } else 2572 return ieee80211_hdrsize(data); 2573 } 2574 2575 static void 2576 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data) 2577 { 2578 const struct ieee80211_frame *wh; 2579 struct ieee80211_node *ni; 2580 2581 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t)); 2582 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 2583 if (ni != NULL) { 2584 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0); 2585 ieee80211_free_node(ni); 2586 } 2587 } 2588 2589 /* 2590 * Convert hardware signal strength to rssi. The value 2591 * provided by the device has the noise floor added in; 2592 * we need to compensate for this but we don't have that 2593 * so we use a fixed value. 2594 * 2595 * The offset of 8 is good for both 2.4 and 5GHz. The LNA 2596 * offset is already set as part of the initial gain. This 2597 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz. 2598 */ 2599 static __inline int 2600 cvtrssi(uint8_t ssi) 2601 { 2602 int rssi = (int) ssi + 8; 2603 /* XXX hack guess until we have a real noise floor */ 2604 rssi = 2*(87 - rssi); /* NB: .5 dBm units */ 2605 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi); 2606 } 2607 2608 static void 2609 mwl_rx_proc(void *arg, int npending) 2610 { 2611 struct mwl_softc *sc = arg; 2612 struct ieee80211com *ic = &sc->sc_ic; 2613 struct mwl_rxbuf *bf; 2614 struct mwl_rxdesc *ds; 2615 struct mbuf *m; 2616 struct ieee80211_qosframe *wh; 2617 struct ieee80211_qosframe_addr4 *wh4; 2618 struct ieee80211_node *ni; 2619 struct mwl_node *mn; 2620 int off, len, hdrlen, pktlen, rssi, ntodo; 2621 uint8_t *data, status; 2622 void *newdata; 2623 int16_t nf; 2624 2625 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n", 2626 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead), 2627 RD4(sc, sc->sc_hwspecs.rxDescWrite)); 2628 nf = -96; /* XXX */ 2629 bf = sc->sc_rxnext; 2630 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) { 2631 if (bf == NULL) 2632 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2633 ds = bf->bf_desc; 2634 data = bf->bf_data; 2635 if (data == NULL) { 2636 /* 2637 * If data allocation failed previously there 2638 * will be no buffer; try again to re-populate it. 2639 * Note the firmware will not advance to the next 2640 * descriptor with a dma buffer so we must mimic 2641 * this or we'll get out of sync. 2642 */ 2643 DPRINTF(sc, MWL_DEBUG_ANY, 2644 "%s: rx buf w/o dma memory\n", __func__); 2645 (void) mwl_rxbuf_init(sc, bf); 2646 sc->sc_stats.mst_rx_dmabufmissing++; 2647 break; 2648 } 2649 MWL_RXDESC_SYNC(sc, ds, 2650 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2651 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN) 2652 break; 2653 #ifdef MWL_DEBUG 2654 if (sc->sc_debug & MWL_DEBUG_RECV_DESC) 2655 mwl_printrxbuf(bf, 0); 2656 #endif 2657 status = ds->Status; 2658 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) { 2659 counter_u64_add(ic->ic_ierrors, 1); 2660 sc->sc_stats.mst_rx_crypto++; 2661 /* 2662 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR 2663 * for backwards compatibility. 2664 */ 2665 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR && 2666 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) { 2667 /* 2668 * MIC error, notify upper layers. 2669 */ 2670 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, 2671 BUS_DMASYNC_POSTREAD); 2672 mwl_handlemicerror(ic, data); 2673 sc->sc_stats.mst_rx_tkipmic++; 2674 } 2675 /* XXX too painful to tap packets */ 2676 goto rx_next; 2677 } 2678 /* 2679 * Sync the data buffer. 2680 */ 2681 len = le16toh(ds->PktLen); 2682 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); 2683 /* 2684 * The 802.11 header is provided all or in part at the front; 2685 * use it to calculate the true size of the header that we'll 2686 * construct below. We use this to figure out where to copy 2687 * payload prior to constructing the header. 2688 */ 2689 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t)); 2690 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); 2691 2692 /* calculate rssi early so we can re-use for each aggregate */ 2693 rssi = cvtrssi(ds->RSSI); 2694 2695 pktlen = hdrlen + (len - off); 2696 /* 2697 * NB: we know our frame is at least as large as 2698 * IEEE80211_MIN_LEN because there is a 4-address 2699 * frame at the front. Hence there's no need to 2700 * vet the packet length. If the frame in fact 2701 * is too small it should be discarded at the 2702 * net80211 layer. 2703 */ 2704 2705 /* 2706 * Attach dma buffer to an mbuf. We tried 2707 * doing this based on the packet size (i.e. 2708 * copying small packets) but it turns out to 2709 * be a net loss. The tradeoff might be system 2710 * dependent (cache architecture is important). 2711 */ 2712 MGETHDR(m, M_NOWAIT, MT_DATA); 2713 if (m == NULL) { 2714 DPRINTF(sc, MWL_DEBUG_ANY, 2715 "%s: no rx mbuf\n", __func__); 2716 sc->sc_stats.mst_rx_nombuf++; 2717 goto rx_next; 2718 } 2719 /* 2720 * Acquire the replacement dma buffer before 2721 * processing the frame. If we're out of dma 2722 * buffers we disable rx interrupts and wait 2723 * for the free pool to reach mlw_rxdmalow buffers 2724 * before starting to do work again. If the firmware 2725 * runs out of descriptors then it will toss frames 2726 * which is better than our doing it as that can 2727 * starve our processing. It is also important that 2728 * we always process rx'd frames in case they are 2729 * A-MPDU as otherwise the host's view of the BA 2730 * window may get out of sync with the firmware. 2731 */ 2732 newdata = mwl_getrxdma(sc); 2733 if (newdata == NULL) { 2734 /* NB: stat+msg in mwl_getrxdma */ 2735 m_free(m); 2736 /* disable RX interrupt and mark state */ 2737 mwl_hal_intrset(sc->sc_mh, 2738 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY); 2739 sc->sc_rxblocked = 1; 2740 ieee80211_drain(ic); 2741 /* XXX check rxblocked and immediately start again? */ 2742 goto rx_stop; 2743 } 2744 bf->bf_data = newdata; 2745 /* 2746 * Attach the dma buffer to the mbuf; 2747 * mwl_rxbuf_init will re-setup the rx 2748 * descriptor using the replacement dma 2749 * buffer we just installed above. 2750 */ 2751 m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0, 2752 EXT_NET_DRV); 2753 m->m_data += off - hdrlen; 2754 m->m_pkthdr.len = m->m_len = pktlen; 2755 /* NB: dma buffer assumed read-only */ 2756 2757 /* 2758 * Piece 802.11 header together. 2759 */ 2760 wh = mtod(m, struct ieee80211_qosframe *); 2761 /* NB: don't need to do this sometimes but ... */ 2762 /* XXX special case so we can memcpy after m_devget? */ 2763 ovbcopy(data + sizeof(uint16_t), wh, hdrlen); 2764 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2765 if (IEEE80211_IS_DSTODS(wh)) { 2766 wh4 = mtod(m, 2767 struct ieee80211_qosframe_addr4*); 2768 *(uint16_t *)wh4->i_qos = ds->QosCtrl; 2769 } else { 2770 *(uint16_t *)wh->i_qos = ds->QosCtrl; 2771 } 2772 } 2773 /* 2774 * The f/w strips WEP header but doesn't clear 2775 * the WEP bit; mark the packet with M_WEP so 2776 * net80211 will treat the data as decrypted. 2777 * While here also clear the PWR_MGT bit since 2778 * power save is handled by the firmware and 2779 * passing this up will potentially cause the 2780 * upper layer to put a station in power save 2781 * (except when configured with MWL_HOST_PS_SUPPORT). 2782 */ 2783 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2784 m->m_flags |= M_WEP; 2785 #ifdef MWL_HOST_PS_SUPPORT 2786 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; 2787 #else 2788 wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED | 2789 IEEE80211_FC1_PWR_MGT); 2790 #endif 2791 2792 if (ieee80211_radiotap_active(ic)) { 2793 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th; 2794 2795 tap->wr_flags = 0; 2796 tap->wr_rate = ds->Rate; 2797 tap->wr_antsignal = rssi + nf; 2798 tap->wr_antnoise = nf; 2799 } 2800 if (IFF_DUMPPKTS_RECV(sc, wh)) { 2801 ieee80211_dump_pkt(ic, mtod(m, caddr_t), 2802 len, ds->Rate, rssi); 2803 } 2804 /* dispatch */ 2805 ni = ieee80211_find_rxnode(ic, 2806 (const struct ieee80211_frame_min *) wh); 2807 if (ni != NULL) { 2808 mn = MWL_NODE(ni); 2809 #ifdef MWL_ANT_INFO_SUPPORT 2810 mn->mn_ai.rssi_a = ds->ai.rssi_a; 2811 mn->mn_ai.rssi_b = ds->ai.rssi_b; 2812 mn->mn_ai.rssi_c = ds->ai.rssi_c; 2813 mn->mn_ai.rsvd1 = rssi; 2814 #endif 2815 /* tag AMPDU aggregates for reorder processing */ 2816 if (ni->ni_flags & IEEE80211_NODE_HT) 2817 m->m_flags |= M_AMPDU; 2818 (void) ieee80211_input(ni, m, rssi, nf); 2819 ieee80211_free_node(ni); 2820 } else 2821 (void) ieee80211_input_all(ic, m, rssi, nf); 2822 rx_next: 2823 /* NB: ignore ENOMEM so we process more descriptors */ 2824 (void) mwl_rxbuf_init(sc, bf); 2825 bf = STAILQ_NEXT(bf, bf_list); 2826 } 2827 rx_stop: 2828 sc->sc_rxnext = bf; 2829 2830 if (mbufq_first(&sc->sc_snd) != NULL) { 2831 /* NB: kick fw; the tx thread may have been preempted */ 2832 mwl_hal_txstart(sc->sc_mh, 0); 2833 mwl_start(sc); 2834 } 2835 } 2836 2837 static void 2838 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum) 2839 { 2840 struct mwl_txbuf *bf, *bn; 2841 struct mwl_txdesc *ds; 2842 2843 MWL_TXQ_LOCK_INIT(sc, txq); 2844 txq->qnum = qnum; 2845 txq->txpri = 0; /* XXX */ 2846 #if 0 2847 /* NB: q setup by mwl_txdma_setup XXX */ 2848 STAILQ_INIT(&txq->free); 2849 #endif 2850 STAILQ_FOREACH(bf, &txq->free, bf_list) { 2851 bf->bf_txq = txq; 2852 2853 ds = bf->bf_desc; 2854 bn = STAILQ_NEXT(bf, bf_list); 2855 if (bn == NULL) 2856 bn = STAILQ_FIRST(&txq->free); 2857 ds->pPhysNext = htole32(bn->bf_daddr); 2858 } 2859 STAILQ_INIT(&txq->active); 2860 } 2861 2862 /* 2863 * Setup a hardware data transmit queue for the specified 2864 * access control. We record the mapping from ac's 2865 * to h/w queues for use by mwl_tx_start. 2866 */ 2867 static int 2868 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype) 2869 { 2870 struct mwl_txq *txq; 2871 2872 if (ac >= nitems(sc->sc_ac2q)) { 2873 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 2874 ac, nitems(sc->sc_ac2q)); 2875 return 0; 2876 } 2877 if (mvtype >= MWL_NUM_TX_QUEUES) { 2878 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n", 2879 mvtype, MWL_NUM_TX_QUEUES); 2880 return 0; 2881 } 2882 txq = &sc->sc_txq[mvtype]; 2883 mwl_txq_init(sc, txq, mvtype); 2884 sc->sc_ac2q[ac] = txq; 2885 return 1; 2886 } 2887 2888 /* 2889 * Update WME parameters for a transmit queue. 2890 */ 2891 static int 2892 mwl_txq_update(struct mwl_softc *sc, int ac) 2893 { 2894 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1) 2895 struct ieee80211com *ic = &sc->sc_ic; 2896 struct chanAccParams chp; 2897 struct mwl_txq *txq = sc->sc_ac2q[ac]; 2898 struct wmeParams *wmep; 2899 struct mwl_hal *mh = sc->sc_mh; 2900 int aifs, cwmin, cwmax, txoplim; 2901 2902 ieee80211_wme_ic_getparams(ic, &chp); 2903 wmep = &chp.cap_wmeParams[ac]; 2904 2905 aifs = wmep->wmep_aifsn; 2906 /* XXX in sta mode need to pass log values for cwmin/max */ 2907 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2908 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2909 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */ 2910 2911 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) { 2912 device_printf(sc->sc_dev, "unable to update hardware queue " 2913 "parameters for %s traffic!\n", 2914 ieee80211_wme_acnames[ac]); 2915 return 0; 2916 } 2917 return 1; 2918 #undef MWL_EXPONENT_TO_VALUE 2919 } 2920 2921 /* 2922 * Callback from the 802.11 layer to update WME parameters. 2923 */ 2924 static int 2925 mwl_wme_update(struct ieee80211com *ic) 2926 { 2927 struct mwl_softc *sc = ic->ic_softc; 2928 2929 return !mwl_txq_update(sc, WME_AC_BE) || 2930 !mwl_txq_update(sc, WME_AC_BK) || 2931 !mwl_txq_update(sc, WME_AC_VI) || 2932 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0; 2933 } 2934 2935 /* 2936 * Reclaim resources for a setup queue. 2937 */ 2938 static void 2939 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq) 2940 { 2941 /* XXX hal work? */ 2942 MWL_TXQ_LOCK_DESTROY(txq); 2943 } 2944 2945 /* 2946 * Reclaim all tx queue resources. 2947 */ 2948 static void 2949 mwl_tx_cleanup(struct mwl_softc *sc) 2950 { 2951 int i; 2952 2953 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) 2954 mwl_tx_cleanupq(sc, &sc->sc_txq[i]); 2955 } 2956 2957 static int 2958 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0) 2959 { 2960 struct mbuf *m; 2961 int error; 2962 2963 /* 2964 * Load the DMA map so any coalescing is done. This 2965 * also calculates the number of descriptors we need. 2966 */ 2967 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 2968 bf->bf_segs, &bf->bf_nseg, 2969 BUS_DMA_NOWAIT); 2970 if (error == EFBIG) { 2971 /* XXX packet requires too many descriptors */ 2972 bf->bf_nseg = MWL_TXDESC+1; 2973 } else if (error != 0) { 2974 sc->sc_stats.mst_tx_busdma++; 2975 m_freem(m0); 2976 return error; 2977 } 2978 /* 2979 * Discard null packets and check for packets that 2980 * require too many TX descriptors. We try to convert 2981 * the latter to a cluster. 2982 */ 2983 if (error == EFBIG) { /* too many desc's, linearize */ 2984 sc->sc_stats.mst_tx_linear++; 2985 #if MWL_TXDESC > 1 2986 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC); 2987 #else 2988 m = m_defrag(m0, M_NOWAIT); 2989 #endif 2990 if (m == NULL) { 2991 m_freem(m0); 2992 sc->sc_stats.mst_tx_nombuf++; 2993 return ENOMEM; 2994 } 2995 m0 = m; 2996 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 2997 bf->bf_segs, &bf->bf_nseg, 2998 BUS_DMA_NOWAIT); 2999 if (error != 0) { 3000 sc->sc_stats.mst_tx_busdma++; 3001 m_freem(m0); 3002 return error; 3003 } 3004 KASSERT(bf->bf_nseg <= MWL_TXDESC, 3005 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 3006 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3007 sc->sc_stats.mst_tx_nodata++; 3008 m_freem(m0); 3009 return EIO; 3010 } 3011 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n", 3012 __func__, m0, m0->m_pkthdr.len); 3013 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3014 bf->bf_m = m0; 3015 3016 return 0; 3017 } 3018 3019 static __inline int 3020 mwl_cvtlegacyrate(int rate) 3021 { 3022 switch (rate) { 3023 case 2: return 0; 3024 case 4: return 1; 3025 case 11: return 2; 3026 case 22: return 3; 3027 case 44: return 4; 3028 case 12: return 5; 3029 case 18: return 6; 3030 case 24: return 7; 3031 case 36: return 8; 3032 case 48: return 9; 3033 case 72: return 10; 3034 case 96: return 11; 3035 case 108:return 12; 3036 } 3037 return 0; 3038 } 3039 3040 /* 3041 * Calculate fixed tx rate information per client state; 3042 * this value is suitable for writing to the Format field 3043 * of a tx descriptor. 3044 */ 3045 static uint16_t 3046 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni) 3047 { 3048 uint16_t fmt; 3049 3050 fmt = SM(3, EAGLE_TXD_ANTENNA) 3051 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ? 3052 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI); 3053 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */ 3054 fmt |= EAGLE_TXD_FORMAT_HT 3055 /* NB: 0x80 implicitly stripped from ucastrate */ 3056 | SM(rate, EAGLE_TXD_RATE); 3057 /* XXX short/long GI may be wrong; re-check */ 3058 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 3059 fmt |= EAGLE_TXD_CHW_40 3060 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ? 3061 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); 3062 } else { 3063 fmt |= EAGLE_TXD_CHW_20 3064 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ? 3065 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); 3066 } 3067 } else { /* legacy rate */ 3068 fmt |= EAGLE_TXD_FORMAT_LEGACY 3069 | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE) 3070 | EAGLE_TXD_CHW_20 3071 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */ 3072 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ? 3073 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG); 3074 } 3075 return fmt; 3076 } 3077 3078 static int 3079 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf, 3080 struct mbuf *m0) 3081 { 3082 struct ieee80211com *ic = &sc->sc_ic; 3083 struct ieee80211vap *vap = ni->ni_vap; 3084 int error, iswep, ismcast; 3085 int hdrlen, copyhdrlen, pktlen; 3086 struct mwl_txdesc *ds; 3087 struct mwl_txq *txq; 3088 struct ieee80211_frame *wh; 3089 struct mwltxrec *tr; 3090 struct mwl_node *mn; 3091 uint16_t qos; 3092 #if MWL_TXDESC > 1 3093 int i; 3094 #endif 3095 3096 wh = mtod(m0, struct ieee80211_frame *); 3097 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 3098 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3099 hdrlen = ieee80211_anyhdrsize(wh); 3100 copyhdrlen = hdrlen; 3101 pktlen = m0->m_pkthdr.len; 3102 if (IEEE80211_QOS_HAS_SEQ(wh)) { 3103 if (IEEE80211_IS_DSTODS(wh)) { 3104 qos = *(uint16_t *) 3105 (((struct ieee80211_qosframe_addr4 *) wh)->i_qos); 3106 copyhdrlen -= sizeof(qos); 3107 } else 3108 qos = *(uint16_t *) 3109 (((struct ieee80211_qosframe *) wh)->i_qos); 3110 } else 3111 qos = 0; 3112 3113 if (iswep) { 3114 const struct ieee80211_cipher *cip; 3115 struct ieee80211_key *k; 3116 3117 /* 3118 * Construct the 802.11 header+trailer for an encrypted 3119 * frame. The only reason this can fail is because of an 3120 * unknown or unsupported cipher/key type. 3121 * 3122 * NB: we do this even though the firmware will ignore 3123 * what we've done for WEP and TKIP as we need the 3124 * ExtIV filled in for CCMP and this also adjusts 3125 * the headers which simplifies our work below. 3126 */ 3127 k = ieee80211_crypto_encap(ni, m0); 3128 if (k == NULL) { 3129 /* 3130 * This can happen when the key is yanked after the 3131 * frame was queued. Just discard the frame; the 3132 * 802.11 layer counts failures and provides 3133 * debugging/diagnostics. 3134 */ 3135 m_freem(m0); 3136 return EIO; 3137 } 3138 /* 3139 * Adjust the packet length for the crypto additions 3140 * done during encap and any other bits that the f/w 3141 * will add later on. 3142 */ 3143 cip = k->wk_cipher; 3144 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer; 3145 3146 /* packet header may have moved, reset our local pointer */ 3147 wh = mtod(m0, struct ieee80211_frame *); 3148 } 3149 3150 if (ieee80211_radiotap_active_vap(vap)) { 3151 sc->sc_tx_th.wt_flags = 0; /* XXX */ 3152 if (iswep) 3153 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3154 #if 0 3155 sc->sc_tx_th.wt_rate = ds->DataRate; 3156 #endif 3157 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3158 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3159 3160 ieee80211_radiotap_tx(vap, m0); 3161 } 3162 /* 3163 * Copy up/down the 802.11 header; the firmware requires 3164 * we present a 2-byte payload length followed by a 3165 * 4-address header (w/o QoS), followed (optionally) by 3166 * any WEP/ExtIV header (but only filled in for CCMP). 3167 * We are assured the mbuf has sufficient headroom to 3168 * prepend in-place by the setup of ic_headroom in 3169 * mwl_attach. 3170 */ 3171 if (hdrlen < sizeof(struct mwltxrec)) { 3172 const int space = sizeof(struct mwltxrec) - hdrlen; 3173 if (M_LEADINGSPACE(m0) < space) { 3174 /* NB: should never happen */ 3175 device_printf(sc->sc_dev, 3176 "not enough headroom, need %d found %zd, " 3177 "m_flags 0x%x m_len %d\n", 3178 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); 3179 ieee80211_dump_pkt(ic, 3180 mtod(m0, const uint8_t *), m0->m_len, 0, -1); 3181 m_freem(m0); 3182 sc->sc_stats.mst_tx_noheadroom++; 3183 return EIO; 3184 } 3185 M_PREPEND(m0, space, M_NOWAIT); 3186 } 3187 tr = mtod(m0, struct mwltxrec *); 3188 if (wh != (struct ieee80211_frame *) &tr->wh) 3189 ovbcopy(wh, &tr->wh, hdrlen); 3190 /* 3191 * Note: the "firmware length" is actually the length 3192 * of the fully formed "802.11 payload". That is, it's 3193 * everything except for the 802.11 header. In particular 3194 * this includes all crypto material including the MIC! 3195 */ 3196 tr->fwlen = htole16(pktlen - hdrlen); 3197 3198 /* 3199 * Load the DMA map so any coalescing is done. This 3200 * also calculates the number of descriptors we need. 3201 */ 3202 error = mwl_tx_dmasetup(sc, bf, m0); 3203 if (error != 0) { 3204 /* NB: stat collected in mwl_tx_dmasetup */ 3205 DPRINTF(sc, MWL_DEBUG_XMIT, 3206 "%s: unable to setup dma\n", __func__); 3207 return error; 3208 } 3209 bf->bf_node = ni; /* NB: held reference */ 3210 m0 = bf->bf_m; /* NB: may have changed */ 3211 tr = mtod(m0, struct mwltxrec *); 3212 wh = (struct ieee80211_frame *)&tr->wh; 3213 3214 /* 3215 * Formulate tx descriptor. 3216 */ 3217 ds = bf->bf_desc; 3218 txq = bf->bf_txq; 3219 3220 ds->QosCtrl = qos; /* NB: already little-endian */ 3221 #if MWL_TXDESC == 1 3222 /* 3223 * NB: multiframes should be zero because the descriptors 3224 * are initialized to zero. This should handle the case 3225 * where the driver is built with MWL_TXDESC=1 but we are 3226 * using firmware with multi-segment support. 3227 */ 3228 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr); 3229 ds->PktLen = htole16(bf->bf_segs[0].ds_len); 3230 #else 3231 ds->multiframes = htole32(bf->bf_nseg); 3232 ds->PktLen = htole16(m0->m_pkthdr.len); 3233 for (i = 0; i < bf->bf_nseg; i++) { 3234 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr); 3235 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len); 3236 } 3237 #endif 3238 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */ 3239 ds->Format = 0; 3240 ds->pad = 0; 3241 ds->ack_wcb_addr = 0; 3242 3243 mn = MWL_NODE(ni); 3244 /* 3245 * Select transmit rate. 3246 */ 3247 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3248 case IEEE80211_FC0_TYPE_MGT: 3249 sc->sc_stats.mst_tx_mgmt++; 3250 /* fall thru... */ 3251 case IEEE80211_FC0_TYPE_CTL: 3252 /* NB: assign to BE q to avoid bursting */ 3253 ds->TxPriority = MWL_WME_AC_BE; 3254 break; 3255 case IEEE80211_FC0_TYPE_DATA: 3256 if (!ismcast) { 3257 const struct ieee80211_txparam *tp = ni->ni_txparms; 3258 /* 3259 * EAPOL frames get forced to a fixed rate and w/o 3260 * aggregation; otherwise check for any fixed rate 3261 * for the client (may depend on association state). 3262 */ 3263 if (m0->m_flags & M_EAPOL) { 3264 const struct mwl_vap *mvp = MWL_VAP_CONST(vap); 3265 ds->Format = mvp->mv_eapolformat; 3266 ds->pad = htole16( 3267 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR); 3268 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3269 /* XXX pre-calculate per node */ 3270 ds->Format = htole16( 3271 mwl_calcformat(tp->ucastrate, ni)); 3272 ds->pad = htole16(EAGLE_TXD_FIXED_RATE); 3273 } 3274 /* NB: EAPOL frames will never have qos set */ 3275 if (qos == 0) 3276 ds->TxPriority = txq->qnum; 3277 #if MWL_MAXBA > 3 3278 else if (mwl_bastream_match(&mn->mn_ba[3], qos)) 3279 ds->TxPriority = mn->mn_ba[3].txq; 3280 #endif 3281 #if MWL_MAXBA > 2 3282 else if (mwl_bastream_match(&mn->mn_ba[2], qos)) 3283 ds->TxPriority = mn->mn_ba[2].txq; 3284 #endif 3285 #if MWL_MAXBA > 1 3286 else if (mwl_bastream_match(&mn->mn_ba[1], qos)) 3287 ds->TxPriority = mn->mn_ba[1].txq; 3288 #endif 3289 #if MWL_MAXBA > 0 3290 else if (mwl_bastream_match(&mn->mn_ba[0], qos)) 3291 ds->TxPriority = mn->mn_ba[0].txq; 3292 #endif 3293 else 3294 ds->TxPriority = txq->qnum; 3295 } else 3296 ds->TxPriority = txq->qnum; 3297 break; 3298 default: 3299 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 3300 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3301 sc->sc_stats.mst_tx_badframetype++; 3302 m_freem(m0); 3303 return EIO; 3304 } 3305 3306 if (IFF_DUMPPKTS_XMIT(sc)) 3307 ieee80211_dump_pkt(ic, 3308 mtod(m0, const uint8_t *)+sizeof(uint16_t), 3309 m0->m_len - sizeof(uint16_t), ds->DataRate, -1); 3310 3311 MWL_TXQ_LOCK(txq); 3312 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED); 3313 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); 3314 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3315 3316 sc->sc_tx_timer = 5; 3317 MWL_TXQ_UNLOCK(txq); 3318 3319 return 0; 3320 } 3321 3322 static __inline int 3323 mwl_cvtlegacyrix(int rix) 3324 { 3325 static const int ieeerates[] = 3326 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 }; 3327 return (rix < nitems(ieeerates) ? ieeerates[rix] : 0); 3328 } 3329 3330 /* 3331 * Process completed xmit descriptors from the specified queue. 3332 */ 3333 static int 3334 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq) 3335 { 3336 #define EAGLE_TXD_STATUS_MCAST \ 3337 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX) 3338 struct ieee80211com *ic = &sc->sc_ic; 3339 struct mwl_txbuf *bf; 3340 struct mwl_txdesc *ds; 3341 struct ieee80211_node *ni; 3342 struct mwl_node *an; 3343 int nreaped; 3344 uint32_t status; 3345 3346 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); 3347 for (nreaped = 0;; nreaped++) { 3348 MWL_TXQ_LOCK(txq); 3349 bf = STAILQ_FIRST(&txq->active); 3350 if (bf == NULL) { 3351 MWL_TXQ_UNLOCK(txq); 3352 break; 3353 } 3354 ds = bf->bf_desc; 3355 MWL_TXDESC_SYNC(txq, ds, 3356 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3357 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) { 3358 MWL_TXQ_UNLOCK(txq); 3359 break; 3360 } 3361 STAILQ_REMOVE_HEAD(&txq->active, bf_list); 3362 MWL_TXQ_UNLOCK(txq); 3363 3364 #ifdef MWL_DEBUG 3365 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC) 3366 mwl_printtxbuf(bf, txq->qnum, nreaped); 3367 #endif 3368 ni = bf->bf_node; 3369 if (ni != NULL) { 3370 an = MWL_NODE(ni); 3371 status = le32toh(ds->Status); 3372 if (status & EAGLE_TXD_STATUS_OK) { 3373 uint16_t Format = le16toh(ds->Format); 3374 uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA); 3375 3376 sc->sc_stats.mst_ant_tx[txant]++; 3377 if (status & EAGLE_TXD_STATUS_OK_RETRY) 3378 sc->sc_stats.mst_tx_retries++; 3379 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY) 3380 sc->sc_stats.mst_tx_mretries++; 3381 if (txq->qnum >= MWL_WME_AC_VO) 3382 ic->ic_wme.wme_hipri_traffic++; 3383 ni->ni_txrate = MS(Format, EAGLE_TXD_RATE); 3384 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) { 3385 ni->ni_txrate = mwl_cvtlegacyrix( 3386 ni->ni_txrate); 3387 } else 3388 ni->ni_txrate |= IEEE80211_RATE_MCS; 3389 sc->sc_stats.mst_tx_rate = ni->ni_txrate; 3390 } else { 3391 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR) 3392 sc->sc_stats.mst_tx_linkerror++; 3393 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY) 3394 sc->sc_stats.mst_tx_xretries++; 3395 if (status & EAGLE_TXD_STATUS_FAILED_AGING) 3396 sc->sc_stats.mst_tx_aging++; 3397 if (bf->bf_m->m_flags & M_FF) 3398 sc->sc_stats.mst_ff_txerr++; 3399 } 3400 if (bf->bf_m->m_flags & M_TXCB) 3401 /* XXX strip fw len in case header inspected */ 3402 m_adj(bf->bf_m, sizeof(uint16_t)); 3403 ieee80211_tx_complete(ni, bf->bf_m, 3404 (status & EAGLE_TXD_STATUS_OK) == 0); 3405 } else 3406 m_freem(bf->bf_m); 3407 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE); 3408 3409 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3410 BUS_DMASYNC_POSTWRITE); 3411 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3412 3413 mwl_puttxbuf_tail(txq, bf); 3414 } 3415 return nreaped; 3416 #undef EAGLE_TXD_STATUS_MCAST 3417 } 3418 3419 /* 3420 * Deferred processing of transmit interrupt; special-cased 3421 * for four hardware queues, 0-3. 3422 */ 3423 static void 3424 mwl_tx_proc(void *arg, int npending) 3425 { 3426 struct mwl_softc *sc = arg; 3427 int nreaped; 3428 3429 /* 3430 * Process each active queue. 3431 */ 3432 nreaped = 0; 3433 if (!STAILQ_EMPTY(&sc->sc_txq[0].active)) 3434 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]); 3435 if (!STAILQ_EMPTY(&sc->sc_txq[1].active)) 3436 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]); 3437 if (!STAILQ_EMPTY(&sc->sc_txq[2].active)) 3438 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]); 3439 if (!STAILQ_EMPTY(&sc->sc_txq[3].active)) 3440 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]); 3441 3442 if (nreaped != 0) { 3443 sc->sc_tx_timer = 0; 3444 if (mbufq_first(&sc->sc_snd) != NULL) { 3445 /* NB: kick fw; the tx thread may have been preempted */ 3446 mwl_hal_txstart(sc->sc_mh, 0); 3447 mwl_start(sc); 3448 } 3449 } 3450 } 3451 3452 static void 3453 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq) 3454 { 3455 struct ieee80211_node *ni; 3456 struct mwl_txbuf *bf; 3457 u_int ix; 3458 3459 /* 3460 * NB: this assumes output has been stopped and 3461 * we do not need to block mwl_tx_tasklet 3462 */ 3463 for (ix = 0;; ix++) { 3464 MWL_TXQ_LOCK(txq); 3465 bf = STAILQ_FIRST(&txq->active); 3466 if (bf == NULL) { 3467 MWL_TXQ_UNLOCK(txq); 3468 break; 3469 } 3470 STAILQ_REMOVE_HEAD(&txq->active, bf_list); 3471 MWL_TXQ_UNLOCK(txq); 3472 #ifdef MWL_DEBUG 3473 if (sc->sc_debug & MWL_DEBUG_RESET) { 3474 struct ieee80211com *ic = &sc->sc_ic; 3475 const struct mwltxrec *tr = 3476 mtod(bf->bf_m, const struct mwltxrec *); 3477 mwl_printtxbuf(bf, txq->qnum, ix); 3478 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, 3479 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); 3480 } 3481 #endif /* MWL_DEBUG */ 3482 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3483 ni = bf->bf_node; 3484 if (ni != NULL) { 3485 /* 3486 * Reclaim node reference. 3487 */ 3488 ieee80211_free_node(ni); 3489 } 3490 m_freem(bf->bf_m); 3491 3492 mwl_puttxbuf_tail(txq, bf); 3493 } 3494 } 3495 3496 /* 3497 * Drain the transmit queues and reclaim resources. 3498 */ 3499 static void 3500 mwl_draintxq(struct mwl_softc *sc) 3501 { 3502 int i; 3503 3504 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) 3505 mwl_tx_draintxq(sc, &sc->sc_txq[i]); 3506 sc->sc_tx_timer = 0; 3507 } 3508 3509 #ifdef MWL_DIAGAPI 3510 /* 3511 * Reset the transmit queues to a pristine state after a fw download. 3512 */ 3513 static void 3514 mwl_resettxq(struct mwl_softc *sc) 3515 { 3516 int i; 3517 3518 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) 3519 mwl_txq_reset(sc, &sc->sc_txq[i]); 3520 } 3521 #endif /* MWL_DIAGAPI */ 3522 3523 /* 3524 * Clear the transmit queues of any frames submitted for the 3525 * specified vap. This is done when the vap is deleted so we 3526 * don't potentially reference the vap after it is gone. 3527 * Note we cannot remove the frames; we only reclaim the node 3528 * reference. 3529 */ 3530 static void 3531 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap) 3532 { 3533 struct mwl_txq *txq; 3534 struct mwl_txbuf *bf; 3535 int i; 3536 3537 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { 3538 txq = &sc->sc_txq[i]; 3539 MWL_TXQ_LOCK(txq); 3540 STAILQ_FOREACH(bf, &txq->active, bf_list) { 3541 struct ieee80211_node *ni = bf->bf_node; 3542 if (ni != NULL && ni->ni_vap == vap) { 3543 bf->bf_node = NULL; 3544 ieee80211_free_node(ni); 3545 } 3546 } 3547 MWL_TXQ_UNLOCK(txq); 3548 } 3549 } 3550 3551 static int 3552 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, 3553 const uint8_t *frm, const uint8_t *efrm) 3554 { 3555 struct mwl_softc *sc = ni->ni_ic->ic_softc; 3556 const struct ieee80211_action *ia; 3557 3558 ia = (const struct ieee80211_action *) frm; 3559 if (ia->ia_category == IEEE80211_ACTION_CAT_HT && 3560 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) { 3561 const struct ieee80211_action_ht_mimopowersave *mps = 3562 (const struct ieee80211_action_ht_mimopowersave *) ia; 3563 3564 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr, 3565 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA, 3566 MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE)); 3567 return 0; 3568 } else 3569 return sc->sc_recv_action(ni, wh, frm, efrm); 3570 } 3571 3572 static int 3573 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 3574 int dialogtoken, int baparamset, int batimeout) 3575 { 3576 struct mwl_softc *sc = ni->ni_ic->ic_softc; 3577 struct ieee80211vap *vap = ni->ni_vap; 3578 struct mwl_node *mn = MWL_NODE(ni); 3579 struct mwl_bastate *bas; 3580 3581 bas = tap->txa_private; 3582 if (bas == NULL) { 3583 const MWL_HAL_BASTREAM *sp; 3584 /* 3585 * Check for a free BA stream slot. 3586 */ 3587 #if MWL_MAXBA > 3 3588 if (mn->mn_ba[3].bastream == NULL) 3589 bas = &mn->mn_ba[3]; 3590 else 3591 #endif 3592 #if MWL_MAXBA > 2 3593 if (mn->mn_ba[2].bastream == NULL) 3594 bas = &mn->mn_ba[2]; 3595 else 3596 #endif 3597 #if MWL_MAXBA > 1 3598 if (mn->mn_ba[1].bastream == NULL) 3599 bas = &mn->mn_ba[1]; 3600 else 3601 #endif 3602 #if MWL_MAXBA > 0 3603 if (mn->mn_ba[0].bastream == NULL) 3604 bas = &mn->mn_ba[0]; 3605 else 3606 #endif 3607 { 3608 /* sta already has max BA streams */ 3609 /* XXX assign BA stream to highest priority tid */ 3610 DPRINTF(sc, MWL_DEBUG_AMPDU, 3611 "%s: already has max bastreams\n", __func__); 3612 sc->sc_stats.mst_ampdu_reject++; 3613 return 0; 3614 } 3615 /* NB: no held reference to ni */ 3616 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap, 3617 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0, 3618 ni->ni_macaddr, tap->txa_tid, ni->ni_htparam, 3619 ni, tap); 3620 if (sp == NULL) { 3621 /* 3622 * No available stream, return 0 so no 3623 * a-mpdu aggregation will be done. 3624 */ 3625 DPRINTF(sc, MWL_DEBUG_AMPDU, 3626 "%s: no bastream available\n", __func__); 3627 sc->sc_stats.mst_ampdu_nostream++; 3628 return 0; 3629 } 3630 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n", 3631 __func__, sp); 3632 /* NB: qos is left zero so we won't match in mwl_tx_start */ 3633 bas->bastream = sp; 3634 tap->txa_private = bas; 3635 } 3636 /* fetch current seq# from the firmware; if available */ 3637 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream, 3638 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr, 3639 &tap->txa_start) != 0) 3640 tap->txa_start = 0; 3641 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); 3642 } 3643 3644 static int 3645 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 3646 int code, int baparamset, int batimeout) 3647 { 3648 struct mwl_softc *sc = ni->ni_ic->ic_softc; 3649 struct mwl_bastate *bas; 3650 3651 bas = tap->txa_private; 3652 if (bas == NULL) { 3653 /* XXX should not happen */ 3654 DPRINTF(sc, MWL_DEBUG_AMPDU, 3655 "%s: no BA stream allocated, TID %d\n", 3656 __func__, tap->txa_tid); 3657 sc->sc_stats.mst_addba_nostream++; 3658 return 0; 3659 } 3660 if (code == IEEE80211_STATUS_SUCCESS) { 3661 struct ieee80211vap *vap = ni->ni_vap; 3662 int bufsiz, error; 3663 3664 /* 3665 * Tell the firmware to setup the BA stream; 3666 * we know resources are available because we 3667 * pre-allocated one before forming the request. 3668 */ 3669 bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ); 3670 if (bufsiz == 0) 3671 bufsiz = IEEE80211_AGGR_BAWMAX; 3672 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap, 3673 bas->bastream, bufsiz, bufsiz, tap->txa_start); 3674 if (error != 0) { 3675 /* 3676 * Setup failed, return immediately so no a-mpdu 3677 * aggregation will be done. 3678 */ 3679 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); 3680 mwl_bastream_free(bas); 3681 tap->txa_private = NULL; 3682 3683 DPRINTF(sc, MWL_DEBUG_AMPDU, 3684 "%s: create failed, error %d, bufsiz %d TID %d " 3685 "htparam 0x%x\n", __func__, error, bufsiz, 3686 tap->txa_tid, ni->ni_htparam); 3687 sc->sc_stats.mst_bacreate_failed++; 3688 return 0; 3689 } 3690 /* NB: cache txq to avoid ptr indirect */ 3691 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq); 3692 DPRINTF(sc, MWL_DEBUG_AMPDU, 3693 "%s: bastream %p assigned to txq %d TID %d bufsiz %d " 3694 "htparam 0x%x\n", __func__, bas->bastream, 3695 bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam); 3696 } else { 3697 /* 3698 * Other side NAK'd us; return the resources. 3699 */ 3700 DPRINTF(sc, MWL_DEBUG_AMPDU, 3701 "%s: request failed with code %d, destroy bastream %p\n", 3702 __func__, code, bas->bastream); 3703 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); 3704 mwl_bastream_free(bas); 3705 tap->txa_private = NULL; 3706 } 3707 /* NB: firmware sends BAR so we don't need to */ 3708 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 3709 } 3710 3711 static void 3712 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 3713 { 3714 struct mwl_softc *sc = ni->ni_ic->ic_softc; 3715 struct mwl_bastate *bas; 3716 3717 bas = tap->txa_private; 3718 if (bas != NULL) { 3719 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n", 3720 __func__, bas->bastream); 3721 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); 3722 mwl_bastream_free(bas); 3723 tap->txa_private = NULL; 3724 } 3725 sc->sc_addba_stop(ni, tap); 3726 } 3727 3728 /* 3729 * Setup the rx data structures. This should only be 3730 * done once or we may get out of sync with the firmware. 3731 */ 3732 static int 3733 mwl_startrecv(struct mwl_softc *sc) 3734 { 3735 if (!sc->sc_recvsetup) { 3736 struct mwl_rxbuf *bf, *prev; 3737 struct mwl_rxdesc *ds; 3738 3739 prev = NULL; 3740 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 3741 int error = mwl_rxbuf_init(sc, bf); 3742 if (error != 0) { 3743 DPRINTF(sc, MWL_DEBUG_RECV, 3744 "%s: mwl_rxbuf_init failed %d\n", 3745 __func__, error); 3746 return error; 3747 } 3748 if (prev != NULL) { 3749 ds = prev->bf_desc; 3750 ds->pPhysNext = htole32(bf->bf_daddr); 3751 } 3752 prev = bf; 3753 } 3754 if (prev != NULL) { 3755 ds = prev->bf_desc; 3756 ds->pPhysNext = 3757 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr); 3758 } 3759 sc->sc_recvsetup = 1; 3760 } 3761 mwl_mode_init(sc); /* set filters, etc. */ 3762 return 0; 3763 } 3764 3765 static MWL_HAL_APMODE 3766 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan) 3767 { 3768 MWL_HAL_APMODE mode; 3769 3770 if (IEEE80211_IS_CHAN_HT(chan)) { 3771 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN) 3772 mode = AP_MODE_N_ONLY; 3773 else if (IEEE80211_IS_CHAN_5GHZ(chan)) 3774 mode = AP_MODE_AandN; 3775 else if (vap->iv_flags & IEEE80211_F_PUREG) 3776 mode = AP_MODE_GandN; 3777 else 3778 mode = AP_MODE_BandGandN; 3779 } else if (IEEE80211_IS_CHAN_ANYG(chan)) { 3780 if (vap->iv_flags & IEEE80211_F_PUREG) 3781 mode = AP_MODE_G_ONLY; 3782 else 3783 mode = AP_MODE_MIXED; 3784 } else if (IEEE80211_IS_CHAN_B(chan)) 3785 mode = AP_MODE_B_ONLY; 3786 else if (IEEE80211_IS_CHAN_A(chan)) 3787 mode = AP_MODE_A_ONLY; 3788 else 3789 mode = AP_MODE_MIXED; /* XXX should not happen? */ 3790 return mode; 3791 } 3792 3793 static int 3794 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan) 3795 { 3796 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 3797 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan)); 3798 } 3799 3800 /* 3801 * Set/change channels. 3802 */ 3803 static int 3804 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan) 3805 { 3806 struct mwl_hal *mh = sc->sc_mh; 3807 struct ieee80211com *ic = &sc->sc_ic; 3808 MWL_HAL_CHANNEL hchan; 3809 int maxtxpow; 3810 3811 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", 3812 __func__, chan->ic_freq, chan->ic_flags); 3813 3814 /* 3815 * Convert to a HAL channel description with 3816 * the flags constrained to reflect the current 3817 * operating mode. 3818 */ 3819 mwl_mapchan(&hchan, chan); 3820 mwl_hal_intrset(mh, 0); /* disable interrupts */ 3821 #if 0 3822 mwl_draintxq(sc); /* clear pending tx frames */ 3823 #endif 3824 mwl_hal_setchannel(mh, &hchan); 3825 /* 3826 * Tx power is cap'd by the regulatory setting and 3827 * possibly a user-set limit. We pass the min of 3828 * these to the hal to apply them to the cal data 3829 * for this channel. 3830 * XXX min bound? 3831 */ 3832 maxtxpow = 2*chan->ic_maxregpower; 3833 if (maxtxpow > ic->ic_txpowlimit) 3834 maxtxpow = ic->ic_txpowlimit; 3835 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2); 3836 /* NB: potentially change mcast/mgt rates */ 3837 mwl_setcurchanrates(sc); 3838 3839 /* 3840 * Update internal state. 3841 */ 3842 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq); 3843 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); 3844 if (IEEE80211_IS_CHAN_A(chan)) { 3845 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A); 3846 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A); 3847 } else if (IEEE80211_IS_CHAN_ANYG(chan)) { 3848 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); 3849 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); 3850 } else { 3851 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); 3852 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); 3853 } 3854 sc->sc_curchan = hchan; 3855 mwl_hal_intrset(mh, sc->sc_imask); 3856 3857 return 0; 3858 } 3859 3860 static void 3861 mwl_scan_start(struct ieee80211com *ic) 3862 { 3863 struct mwl_softc *sc = ic->ic_softc; 3864 3865 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); 3866 } 3867 3868 static void 3869 mwl_scan_end(struct ieee80211com *ic) 3870 { 3871 struct mwl_softc *sc = ic->ic_softc; 3872 3873 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); 3874 } 3875 3876 static void 3877 mwl_set_channel(struct ieee80211com *ic) 3878 { 3879 struct mwl_softc *sc = ic->ic_softc; 3880 3881 (void) mwl_chan_set(sc, ic->ic_curchan); 3882 } 3883 3884 /* 3885 * Handle a channel switch request. We inform the firmware 3886 * and mark the global state to suppress various actions. 3887 * NB: we issue only one request to the fw; we may be called 3888 * multiple times if there are multiple vap's. 3889 */ 3890 static void 3891 mwl_startcsa(struct ieee80211vap *vap) 3892 { 3893 struct ieee80211com *ic = vap->iv_ic; 3894 struct mwl_softc *sc = ic->ic_softc; 3895 MWL_HAL_CHANNEL hchan; 3896 3897 if (sc->sc_csapending) 3898 return; 3899 3900 mwl_mapchan(&hchan, ic->ic_csa_newchan); 3901 /* 1 =>'s quiet channel */ 3902 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count); 3903 sc->sc_csapending = 1; 3904 } 3905 3906 /* 3907 * Plumb any static WEP key for the station. This is 3908 * necessary as we must propagate the key from the 3909 * global key table of the vap to each sta db entry. 3910 */ 3911 static void 3912 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3913 { 3914 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) == 3915 IEEE80211_F_PRIVACY && 3916 vap->iv_def_txkey != IEEE80211_KEYIX_NONE && 3917 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE) 3918 (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], 3919 mac); 3920 } 3921 3922 static int 3923 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi) 3924 { 3925 #define WME(ie) ((const struct ieee80211_wme_info *) ie) 3926 struct ieee80211vap *vap = ni->ni_vap; 3927 struct mwl_hal_vap *hvap; 3928 int error; 3929 3930 if (vap->iv_opmode == IEEE80211_M_WDS) { 3931 /* 3932 * WDS vap's do not have a f/w vap; instead they piggyback 3933 * on an AP vap and we must install the sta db entry and 3934 * crypto state using that AP's handle (the WDS vap has none). 3935 */ 3936 hvap = MWL_VAP(vap)->mv_ap_hvap; 3937 } else 3938 hvap = MWL_VAP(vap)->mv_hvap; 3939 error = mwl_hal_newstation(hvap, ni->ni_macaddr, 3940 aid, staid, pi, 3941 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT), 3942 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0); 3943 if (error == 0) { 3944 /* 3945 * Setup security for this station. For sta mode this is 3946 * needed even though do the same thing on transition to 3947 * AUTH state because the call to mwl_hal_newstation 3948 * clobbers the crypto state we setup. 3949 */ 3950 mwl_setanywepkey(vap, ni->ni_macaddr); 3951 } 3952 return error; 3953 #undef WME 3954 } 3955 3956 static void 3957 mwl_setglobalkeys(struct ieee80211vap *vap) 3958 { 3959 struct ieee80211_key *wk; 3960 3961 wk = &vap->iv_nw_keys[0]; 3962 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++) 3963 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 3964 (void) _mwl_key_set(vap, wk, vap->iv_myaddr); 3965 } 3966 3967 /* 3968 * Convert a legacy rate set to a firmware bitmask. 3969 */ 3970 static uint32_t 3971 get_rate_bitmap(const struct ieee80211_rateset *rs) 3972 { 3973 uint32_t rates; 3974 int i; 3975 3976 rates = 0; 3977 for (i = 0; i < rs->rs_nrates; i++) 3978 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) { 3979 case 2: rates |= 0x001; break; 3980 case 4: rates |= 0x002; break; 3981 case 11: rates |= 0x004; break; 3982 case 22: rates |= 0x008; break; 3983 case 44: rates |= 0x010; break; 3984 case 12: rates |= 0x020; break; 3985 case 18: rates |= 0x040; break; 3986 case 24: rates |= 0x080; break; 3987 case 36: rates |= 0x100; break; 3988 case 48: rates |= 0x200; break; 3989 case 72: rates |= 0x400; break; 3990 case 96: rates |= 0x800; break; 3991 case 108: rates |= 0x1000; break; 3992 } 3993 return rates; 3994 } 3995 3996 /* 3997 * Construct an HT firmware bitmask from an HT rate set. 3998 */ 3999 static uint32_t 4000 get_htrate_bitmap(const struct ieee80211_htrateset *rs) 4001 { 4002 uint32_t rates; 4003 int i; 4004 4005 rates = 0; 4006 for (i = 0; i < rs->rs_nrates; i++) { 4007 if (rs->rs_rates[i] < 16) 4008 rates |= 1<<rs->rs_rates[i]; 4009 } 4010 return rates; 4011 } 4012 4013 /* 4014 * Craft station database entry for station. 4015 * NB: use host byte order here, the hal handles byte swapping. 4016 */ 4017 static MWL_HAL_PEERINFO * 4018 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni) 4019 { 4020 const struct ieee80211vap *vap = ni->ni_vap; 4021 4022 memset(pi, 0, sizeof(*pi)); 4023 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates); 4024 pi->CapInfo = ni->ni_capinfo; 4025 if (ni->ni_flags & IEEE80211_NODE_HT) { 4026 /* HT capabilities, etc */ 4027 pi->HTCapabilitiesInfo = ni->ni_htcap; 4028 /* XXX pi.HTCapabilitiesInfo */ 4029 pi->MacHTParamInfo = ni->ni_htparam; 4030 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates); 4031 pi->AddHtInfo.ControlChan = ni->ni_htctlchan; 4032 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan; 4033 pi->AddHtInfo.OpMode = ni->ni_htopmode; 4034 pi->AddHtInfo.stbc = ni->ni_htstbc; 4035 4036 /* constrain according to local configuration */ 4037 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0) 4038 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40; 4039 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) 4040 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20; 4041 if (ni->ni_chw != 40) 4042 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40; 4043 } 4044 return pi; 4045 } 4046 4047 /* 4048 * Re-create the local sta db entry for a vap to ensure 4049 * up to date WME state is pushed to the firmware. Because 4050 * this resets crypto state this must be followed by a 4051 * reload of any keys in the global key table. 4052 */ 4053 static int 4054 mwl_localstadb(struct ieee80211vap *vap) 4055 { 4056 #define WME(ie) ((const struct ieee80211_wme_info *) ie) 4057 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; 4058 struct ieee80211_node *bss; 4059 MWL_HAL_PEERINFO pi; 4060 int error; 4061 4062 switch (vap->iv_opmode) { 4063 case IEEE80211_M_STA: 4064 bss = vap->iv_bss; 4065 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, 4066 vap->iv_state == IEEE80211_S_RUN ? 4067 mkpeerinfo(&pi, bss) : NULL, 4068 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)), 4069 bss->ni_ies.wme_ie != NULL ? 4070 WME(bss->ni_ies.wme_ie)->wme_info : 0); 4071 if (error == 0) 4072 mwl_setglobalkeys(vap); 4073 break; 4074 case IEEE80211_M_HOSTAP: 4075 case IEEE80211_M_MBSS: 4076 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 4077 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0); 4078 if (error == 0) 4079 mwl_setglobalkeys(vap); 4080 break; 4081 default: 4082 error = 0; 4083 break; 4084 } 4085 return error; 4086 #undef WME 4087 } 4088 4089 static int 4090 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4091 { 4092 struct mwl_vap *mvp = MWL_VAP(vap); 4093 struct mwl_hal_vap *hvap = mvp->mv_hvap; 4094 struct ieee80211com *ic = vap->iv_ic; 4095 struct ieee80211_node *ni = NULL; 4096 struct mwl_softc *sc = ic->ic_softc; 4097 struct mwl_hal *mh = sc->sc_mh; 4098 enum ieee80211_state ostate = vap->iv_state; 4099 int error; 4100 4101 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n", 4102 vap->iv_ifp->if_xname, __func__, 4103 ieee80211_state_name[ostate], ieee80211_state_name[nstate]); 4104 4105 callout_stop(&sc->sc_timer); 4106 /* 4107 * Clear current radar detection state. 4108 */ 4109 if (ostate == IEEE80211_S_CAC) { 4110 /* stop quiet mode radar detection */ 4111 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP); 4112 } else if (sc->sc_radarena) { 4113 /* stop in-service radar detection */ 4114 mwl_hal_setradardetection(mh, DR_DFS_DISABLE); 4115 sc->sc_radarena = 0; 4116 } 4117 /* 4118 * Carry out per-state actions before doing net80211 work. 4119 */ 4120 if (nstate == IEEE80211_S_INIT) { 4121 /* NB: only ap+sta vap's have a fw entity */ 4122 if (hvap != NULL) 4123 mwl_hal_stop(hvap); 4124 } else if (nstate == IEEE80211_S_SCAN) { 4125 mwl_hal_start(hvap); 4126 /* NB: this disables beacon frames */ 4127 mwl_hal_setinframode(hvap); 4128 } else if (nstate == IEEE80211_S_AUTH) { 4129 /* 4130 * Must create a sta db entry in case a WEP key needs to 4131 * be plumbed. This entry will be overwritten if we 4132 * associate; otherwise it will be reclaimed on node free. 4133 */ 4134 ni = vap->iv_bss; 4135 MWL_NODE(ni)->mn_hvap = hvap; 4136 (void) mwl_peerstadb(ni, 0, 0, NULL); 4137 } else if (nstate == IEEE80211_S_CSA) { 4138 /* XXX move to below? */ 4139 if (vap->iv_opmode == IEEE80211_M_HOSTAP || 4140 vap->iv_opmode == IEEE80211_M_MBSS) 4141 mwl_startcsa(vap); 4142 } else if (nstate == IEEE80211_S_CAC) { 4143 /* XXX move to below? */ 4144 /* stop ap xmit and enable quiet mode radar detection */ 4145 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START); 4146 } 4147 4148 /* 4149 * Invoke the parent method to do net80211 work. 4150 */ 4151 error = mvp->mv_newstate(vap, nstate, arg); 4152 4153 /* 4154 * Carry out work that must be done after net80211 runs; 4155 * this work requires up to date state (e.g. iv_bss). 4156 */ 4157 if (error == 0 && nstate == IEEE80211_S_RUN) { 4158 /* NB: collect bss node again, it may have changed */ 4159 ni = vap->iv_bss; 4160 4161 DPRINTF(sc, MWL_DEBUG_STATE, 4162 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4163 "capinfo 0x%04x chan %d\n", 4164 vap->iv_ifp->if_xname, __func__, vap->iv_flags, 4165 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, 4166 ieee80211_chan2ieee(ic, ic->ic_curchan)); 4167 4168 /* 4169 * Recreate local sta db entry to update WME/HT state. 4170 */ 4171 mwl_localstadb(vap); 4172 switch (vap->iv_opmode) { 4173 case IEEE80211_M_HOSTAP: 4174 case IEEE80211_M_MBSS: 4175 if (ostate == IEEE80211_S_CAC) { 4176 /* enable in-service radar detection */ 4177 mwl_hal_setradardetection(mh, 4178 DR_IN_SERVICE_MONITOR_START); 4179 sc->sc_radarena = 1; 4180 } 4181 /* 4182 * Allocate and setup the beacon frame 4183 * (and related state). 4184 */ 4185 error = mwl_reset_vap(vap, IEEE80211_S_RUN); 4186 if (error != 0) { 4187 DPRINTF(sc, MWL_DEBUG_STATE, 4188 "%s: beacon setup failed, error %d\n", 4189 __func__, error); 4190 goto bad; 4191 } 4192 /* NB: must be after setting up beacon */ 4193 mwl_hal_start(hvap); 4194 break; 4195 case IEEE80211_M_STA: 4196 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n", 4197 vap->iv_ifp->if_xname, __func__, ni->ni_associd); 4198 /* 4199 * Set state now that we're associated. 4200 */ 4201 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd); 4202 mwl_setrates(vap); 4203 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); 4204 if ((vap->iv_flags & IEEE80211_F_DWDS) && 4205 sc->sc_ndwdsvaps++ == 0) 4206 mwl_hal_setdwds(mh, 1); 4207 break; 4208 case IEEE80211_M_WDS: 4209 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n", 4210 vap->iv_ifp->if_xname, __func__, 4211 ether_sprintf(ni->ni_bssid)); 4212 mwl_seteapolformat(vap); 4213 break; 4214 default: 4215 break; 4216 } 4217 /* 4218 * Set CS mode according to operating channel; 4219 * this mostly an optimization for 5GHz. 4220 * 4221 * NB: must follow mwl_hal_start which resets csmode 4222 */ 4223 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) 4224 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE); 4225 else 4226 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA); 4227 /* 4228 * Start timer to prod firmware. 4229 */ 4230 if (sc->sc_ageinterval != 0) 4231 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz, 4232 mwl_agestations, sc); 4233 } else if (nstate == IEEE80211_S_SLEEP) { 4234 /* XXX set chip in power save */ 4235 } else if ((vap->iv_flags & IEEE80211_F_DWDS) && 4236 --sc->sc_ndwdsvaps == 0) 4237 mwl_hal_setdwds(mh, 0); 4238 bad: 4239 return error; 4240 } 4241 4242 /* 4243 * Manage station id's; these are separate from AID's 4244 * as AID's may have values out of the range of possible 4245 * station id's acceptable to the firmware. 4246 */ 4247 static int 4248 allocstaid(struct mwl_softc *sc, int aid) 4249 { 4250 int staid; 4251 4252 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) { 4253 /* NB: don't use 0 */ 4254 for (staid = 1; staid < MWL_MAXSTAID; staid++) 4255 if (isclr(sc->sc_staid, staid)) 4256 break; 4257 } else 4258 staid = aid; 4259 setbit(sc->sc_staid, staid); 4260 return staid; 4261 } 4262 4263 static void 4264 delstaid(struct mwl_softc *sc, int staid) 4265 { 4266 clrbit(sc->sc_staid, staid); 4267 } 4268 4269 /* 4270 * Setup driver-specific state for a newly associated node. 4271 * Note that we're called also on a re-associate, the isnew 4272 * param tells us if this is the first time or not. 4273 */ 4274 static void 4275 mwl_newassoc(struct ieee80211_node *ni, int isnew) 4276 { 4277 struct ieee80211vap *vap = ni->ni_vap; 4278 struct mwl_softc *sc = vap->iv_ic->ic_softc; 4279 struct mwl_node *mn = MWL_NODE(ni); 4280 MWL_HAL_PEERINFO pi; 4281 uint16_t aid; 4282 int error; 4283 4284 aid = IEEE80211_AID(ni->ni_associd); 4285 if (isnew) { 4286 mn->mn_staid = allocstaid(sc, aid); 4287 mn->mn_hvap = MWL_VAP(vap)->mv_hvap; 4288 } else { 4289 mn = MWL_NODE(ni); 4290 /* XXX reset BA stream? */ 4291 } 4292 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n", 4293 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid); 4294 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni)); 4295 if (error != 0) { 4296 DPRINTF(sc, MWL_DEBUG_NODE, 4297 "%s: error %d creating sta db entry\n", 4298 __func__, error); 4299 /* XXX how to deal with error? */ 4300 } 4301 } 4302 4303 /* 4304 * Periodically poke the firmware to age out station state 4305 * (power save queues, pending tx aggregates). 4306 */ 4307 static void 4308 mwl_agestations(void *arg) 4309 { 4310 struct mwl_softc *sc = arg; 4311 4312 mwl_hal_setkeepalive(sc->sc_mh); 4313 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */ 4314 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz); 4315 } 4316 4317 static const struct mwl_hal_channel * 4318 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee) 4319 { 4320 int i; 4321 4322 for (i = 0; i < ci->nchannels; i++) { 4323 const struct mwl_hal_channel *hc = &ci->channels[i]; 4324 if (hc->ieee == ieee) 4325 return hc; 4326 } 4327 return NULL; 4328 } 4329 4330 static int 4331 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 4332 int nchan, struct ieee80211_channel chans[]) 4333 { 4334 struct mwl_softc *sc = ic->ic_softc; 4335 struct mwl_hal *mh = sc->sc_mh; 4336 const MWL_HAL_CHANNELINFO *ci; 4337 int i; 4338 4339 for (i = 0; i < nchan; i++) { 4340 struct ieee80211_channel *c = &chans[i]; 4341 const struct mwl_hal_channel *hc; 4342 4343 if (IEEE80211_IS_CHAN_2GHZ(c)) { 4344 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ, 4345 IEEE80211_IS_CHAN_HT40(c) ? 4346 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); 4347 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 4348 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ, 4349 IEEE80211_IS_CHAN_HT40(c) ? 4350 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); 4351 } else { 4352 device_printf(sc->sc_dev, 4353 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n", 4354 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 4355 return EINVAL; 4356 } 4357 /* 4358 * Verify channel has cal data and cap tx power. 4359 */ 4360 hc = findhalchannel(ci, c->ic_ieee); 4361 if (hc != NULL) { 4362 if (c->ic_maxpower > 2*hc->maxTxPow) 4363 c->ic_maxpower = 2*hc->maxTxPow; 4364 goto next; 4365 } 4366 if (IEEE80211_IS_CHAN_HT40(c)) { 4367 /* 4368 * Look for the extension channel since the 4369 * hal table only has the primary channel. 4370 */ 4371 hc = findhalchannel(ci, c->ic_extieee); 4372 if (hc != NULL) { 4373 if (c->ic_maxpower > 2*hc->maxTxPow) 4374 c->ic_maxpower = 2*hc->maxTxPow; 4375 goto next; 4376 } 4377 } 4378 device_printf(sc->sc_dev, 4379 "%s: no cal data for channel %u ext %u freq %u/0x%x\n", 4380 __func__, c->ic_ieee, c->ic_extieee, 4381 c->ic_freq, c->ic_flags); 4382 return EINVAL; 4383 next: 4384 ; 4385 } 4386 return 0; 4387 } 4388 4389 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G) 4390 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A) 4391 4392 static void 4393 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans, 4394 const MWL_HAL_CHANNELINFO *ci, int flags) 4395 { 4396 int i, error; 4397 4398 for (i = 0; i < ci->nchannels; i++) { 4399 const struct mwl_hal_channel *hc = &ci->channels[i]; 4400 4401 error = ieee80211_add_channel_ht40(chans, maxchans, nchans, 4402 hc->ieee, hc->maxTxPow, flags); 4403 if (error != 0 && error != ENOENT) 4404 break; 4405 } 4406 } 4407 4408 static void 4409 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, 4410 const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[]) 4411 { 4412 int i, error; 4413 4414 error = 0; 4415 for (i = 0; i < ci->nchannels && error == 0; i++) { 4416 const struct mwl_hal_channel *hc = &ci->channels[i]; 4417 4418 error = ieee80211_add_channel(chans, maxchans, nchans, 4419 hc->ieee, hc->freq, hc->maxTxPow, 0, bands); 4420 } 4421 } 4422 4423 static void 4424 getchannels(struct mwl_softc *sc, int maxchans, int *nchans, 4425 struct ieee80211_channel chans[]) 4426 { 4427 const MWL_HAL_CHANNELINFO *ci; 4428 uint8_t bands[IEEE80211_MODE_BYTES]; 4429 4430 /* 4431 * Use the channel info from the hal to craft the 4432 * channel list. Note that we pass back an unsorted 4433 * list; the caller is required to sort it for us 4434 * (if desired). 4435 */ 4436 *nchans = 0; 4437 if (mwl_hal_getchannelinfo(sc->sc_mh, 4438 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { 4439 memset(bands, 0, sizeof(bands)); 4440 setbit(bands, IEEE80211_MODE_11B); 4441 setbit(bands, IEEE80211_MODE_11G); 4442 setbit(bands, IEEE80211_MODE_11NG); 4443 addchannels(chans, maxchans, nchans, ci, bands); 4444 } 4445 if (mwl_hal_getchannelinfo(sc->sc_mh, 4446 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { 4447 memset(bands, 0, sizeof(bands)); 4448 setbit(bands, IEEE80211_MODE_11A); 4449 setbit(bands, IEEE80211_MODE_11NA); 4450 addchannels(chans, maxchans, nchans, ci, bands); 4451 } 4452 if (mwl_hal_getchannelinfo(sc->sc_mh, 4453 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) 4454 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG); 4455 if (mwl_hal_getchannelinfo(sc->sc_mh, 4456 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) 4457 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA); 4458 } 4459 4460 static void 4461 mwl_getradiocaps(struct ieee80211com *ic, 4462 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4463 { 4464 struct mwl_softc *sc = ic->ic_softc; 4465 4466 getchannels(sc, maxchans, nchans, chans); 4467 } 4468 4469 static int 4470 mwl_getchannels(struct mwl_softc *sc) 4471 { 4472 struct ieee80211com *ic = &sc->sc_ic; 4473 4474 /* 4475 * Use the channel info from the hal to craft the 4476 * channel list for net80211. Note that we pass up 4477 * an unsorted list; net80211 will sort it for us. 4478 */ 4479 memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); 4480 ic->ic_nchans = 0; 4481 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); 4482 4483 ic->ic_regdomain.regdomain = SKU_DEBUG; 4484 ic->ic_regdomain.country = CTRY_DEFAULT; 4485 ic->ic_regdomain.location = 'I'; 4486 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */ 4487 ic->ic_regdomain.isocc[1] = ' '; 4488 return (ic->ic_nchans == 0 ? EIO : 0); 4489 } 4490 #undef IEEE80211_CHAN_HTA 4491 #undef IEEE80211_CHAN_HTG 4492 4493 #ifdef MWL_DEBUG 4494 static void 4495 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix) 4496 { 4497 const struct mwl_rxdesc *ds = bf->bf_desc; 4498 uint32_t status = le32toh(ds->Status); 4499 4500 printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" 4501 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n", 4502 ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext), 4503 le32toh(ds->pPhysBuffData), ds->RxControl, 4504 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ? 4505 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !", 4506 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel, 4507 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2)); 4508 } 4509 4510 static void 4511 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix) 4512 { 4513 const struct mwl_txdesc *ds = bf->bf_desc; 4514 uint32_t status = le32toh(ds->Status); 4515 4516 printf("Q%u[%3u]", qnum, ix); 4517 printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); 4518 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", 4519 le32toh(ds->pPhysNext), 4520 le32toh(ds->PktPtr), le16toh(ds->PktLen), status, 4521 status & EAGLE_TXD_STATUS_USED ? 4522 "" : (status & 3) != 0 ? " *" : " !"); 4523 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", 4524 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl), 4525 le32toh(ds->SapPktInfo), le16toh(ds->Format)); 4526 #if MWL_TXDESC > 1 4527 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n" 4528 , le32toh(ds->multiframes) 4529 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1]) 4530 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3]) 4531 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5]) 4532 ); 4533 printf(" DATA:%08x %08x %08x %08x %08x %08x\n" 4534 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1]) 4535 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3]) 4536 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5]) 4537 ); 4538 #endif 4539 #if 0 4540 { const uint8_t *cp = (const uint8_t *) ds; 4541 int i; 4542 for (i = 0; i < sizeof(struct mwl_txdesc); i++) { 4543 printf("%02x ", cp[i]); 4544 if (((i+1) % 16) == 0) 4545 printf("\n"); 4546 } 4547 printf("\n"); 4548 } 4549 #endif 4550 } 4551 #endif /* MWL_DEBUG */ 4552 4553 #if 0 4554 static void 4555 mwl_txq_dump(struct mwl_txq *txq) 4556 { 4557 struct mwl_txbuf *bf; 4558 int i = 0; 4559 4560 MWL_TXQ_LOCK(txq); 4561 STAILQ_FOREACH(bf, &txq->active, bf_list) { 4562 struct mwl_txdesc *ds = bf->bf_desc; 4563 MWL_TXDESC_SYNC(txq, ds, 4564 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4565 #ifdef MWL_DEBUG 4566 mwl_printtxbuf(bf, txq->qnum, i); 4567 #endif 4568 i++; 4569 } 4570 MWL_TXQ_UNLOCK(txq); 4571 } 4572 #endif 4573 4574 static void 4575 mwl_watchdog(void *arg) 4576 { 4577 struct mwl_softc *sc = arg; 4578 4579 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); 4580 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0) 4581 return; 4582 4583 if (sc->sc_running && !sc->sc_invalid) { 4584 if (mwl_hal_setkeepalive(sc->sc_mh)) 4585 device_printf(sc->sc_dev, 4586 "transmit timeout (firmware hung?)\n"); 4587 else 4588 device_printf(sc->sc_dev, 4589 "transmit timeout\n"); 4590 #if 0 4591 mwl_reset(sc); 4592 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/ 4593 #endif 4594 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4595 sc->sc_stats.mst_watchdog++; 4596 } 4597 } 4598 4599 #ifdef MWL_DIAGAPI 4600 /* 4601 * Diagnostic interface to the HAL. This is used by various 4602 * tools to do things like retrieve register contents for 4603 * debugging. The mechanism is intentionally opaque so that 4604 * it can change frequently w/o concern for compatibility. 4605 */ 4606 static int 4607 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md) 4608 { 4609 struct mwl_hal *mh = sc->sc_mh; 4610 u_int id = md->md_id & MWL_DIAG_ID; 4611 void *indata = NULL; 4612 void *outdata = NULL; 4613 u_int32_t insize = md->md_in_size; 4614 u_int32_t outsize = md->md_out_size; 4615 int error = 0; 4616 4617 if (md->md_id & MWL_DIAG_IN) { 4618 /* 4619 * Copy in data. 4620 */ 4621 indata = malloc(insize, M_TEMP, M_NOWAIT); 4622 if (indata == NULL) { 4623 error = ENOMEM; 4624 goto bad; 4625 } 4626 error = copyin(md->md_in_data, indata, insize); 4627 if (error) 4628 goto bad; 4629 } 4630 if (md->md_id & MWL_DIAG_DYN) { 4631 /* 4632 * Allocate a buffer for the results (otherwise the HAL 4633 * returns a pointer to a buffer where we can read the 4634 * results). Note that we depend on the HAL leaving this 4635 * pointer for us to use below in reclaiming the buffer; 4636 * may want to be more defensive. 4637 */ 4638 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4639 if (outdata == NULL) { 4640 error = ENOMEM; 4641 goto bad; 4642 } 4643 } 4644 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) { 4645 if (outsize < md->md_out_size) 4646 md->md_out_size = outsize; 4647 if (outdata != NULL) 4648 error = copyout(outdata, md->md_out_data, 4649 md->md_out_size); 4650 } else { 4651 error = EINVAL; 4652 } 4653 bad: 4654 if ((md->md_id & MWL_DIAG_IN) && indata != NULL) 4655 free(indata, M_TEMP); 4656 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL) 4657 free(outdata, M_TEMP); 4658 return error; 4659 } 4660 4661 static int 4662 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md) 4663 { 4664 struct mwl_hal *mh = sc->sc_mh; 4665 int error; 4666 4667 MWL_LOCK_ASSERT(sc); 4668 4669 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) { 4670 device_printf(sc->sc_dev, "unable to load firmware\n"); 4671 return EIO; 4672 } 4673 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { 4674 device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); 4675 return EIO; 4676 } 4677 error = mwl_setupdma(sc); 4678 if (error != 0) { 4679 /* NB: mwl_setupdma prints a msg */ 4680 return error; 4681 } 4682 /* 4683 * Reset tx/rx data structures; after reload we must 4684 * re-start the driver's notion of the next xmit/recv. 4685 */ 4686 mwl_draintxq(sc); /* clear pending frames */ 4687 mwl_resettxq(sc); /* rebuild tx q lists */ 4688 sc->sc_rxnext = NULL; /* force rx to start at the list head */ 4689 return 0; 4690 } 4691 #endif /* MWL_DIAGAPI */ 4692 4693 static void 4694 mwl_parent(struct ieee80211com *ic) 4695 { 4696 struct mwl_softc *sc = ic->ic_softc; 4697 int startall = 0; 4698 4699 MWL_LOCK(sc); 4700 if (ic->ic_nrunning > 0) { 4701 if (sc->sc_running) { 4702 /* 4703 * To avoid rescanning another access point, 4704 * do not call mwl_init() here. Instead, 4705 * only reflect promisc mode settings. 4706 */ 4707 mwl_mode_init(sc); 4708 } else { 4709 /* 4710 * Beware of being called during attach/detach 4711 * to reset promiscuous mode. In that case we 4712 * will still be marked UP but not RUNNING. 4713 * However trying to re-init the interface 4714 * is the wrong thing to do as we've already 4715 * torn down much of our state. There's 4716 * probably a better way to deal with this. 4717 */ 4718 if (!sc->sc_invalid) { 4719 mwl_init(sc); /* XXX lose error */ 4720 startall = 1; 4721 } 4722 } 4723 } else 4724 mwl_stop(sc); 4725 MWL_UNLOCK(sc); 4726 if (startall) 4727 ieee80211_start_all(ic); 4728 } 4729 4730 static int 4731 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data) 4732 { 4733 struct mwl_softc *sc = ic->ic_softc; 4734 struct ifreq *ifr = data; 4735 int error = 0; 4736 4737 switch (cmd) { 4738 case SIOCGMVSTATS: 4739 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats); 4740 #if 0 4741 /* NB: embed these numbers to get a consistent view */ 4742 sc->sc_stats.mst_tx_packets = 4743 ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); 4744 sc->sc_stats.mst_rx_packets = 4745 ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); 4746 #endif 4747 /* 4748 * NB: Drop the softc lock in case of a page fault; 4749 * we'll accept any potential inconsisentcy in the 4750 * statistics. The alternative is to copy the data 4751 * to a local structure. 4752 */ 4753 return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr), 4754 sizeof (sc->sc_stats))); 4755 #ifdef MWL_DIAGAPI 4756 case SIOCGMVDIAG: 4757 /* XXX check privs */ 4758 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr); 4759 case SIOCGMVRESET: 4760 /* XXX check privs */ 4761 MWL_LOCK(sc); 4762 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr); 4763 MWL_UNLOCK(sc); 4764 break; 4765 #endif /* MWL_DIAGAPI */ 4766 default: 4767 error = ENOTTY; 4768 break; 4769 } 4770 return (error); 4771 } 4772 4773 #ifdef MWL_DEBUG 4774 static int 4775 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS) 4776 { 4777 struct mwl_softc *sc = arg1; 4778 int debug, error; 4779 4780 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24); 4781 error = sysctl_handle_int(oidp, &debug, 0, req); 4782 if (error || !req->newptr) 4783 return error; 4784 mwl_hal_setdebug(sc->sc_mh, debug >> 24); 4785 sc->sc_debug = debug & 0x00ffffff; 4786 return 0; 4787 } 4788 #endif /* MWL_DEBUG */ 4789 4790 static void 4791 mwl_sysctlattach(struct mwl_softc *sc) 4792 { 4793 #ifdef MWL_DEBUG 4794 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 4795 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 4796 4797 sc->sc_debug = mwl_debug; 4798 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 4799 "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 4800 mwl_sysctl_debug, "I", "control debugging printfs"); 4801 #endif 4802 } 4803 4804 /* 4805 * Announce various information on device/driver attach. 4806 */ 4807 static void 4808 mwl_announce(struct mwl_softc *sc) 4809 { 4810 4811 device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n", 4812 sc->sc_hwspecs.hwVersion, 4813 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff, 4814 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff, 4815 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff, 4816 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff, 4817 sc->sc_hwspecs.regionCode); 4818 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber; 4819 4820 if (bootverbose) { 4821 int i; 4822 for (i = 0; i <= WME_AC_VO; i++) { 4823 struct mwl_txq *txq = sc->sc_ac2q[i]; 4824 device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n", 4825 txq->qnum, ieee80211_wme_acnames[i]); 4826 } 4827 } 4828 if (bootverbose || mwl_rxdesc != MWL_RXDESC) 4829 device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc); 4830 if (bootverbose || mwl_rxbuf != MWL_RXBUF) 4831 device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf); 4832 if (bootverbose || mwl_txbuf != MWL_TXBUF) 4833 device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf); 4834 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh)) 4835 device_printf(sc->sc_dev, "multi-bss support\n"); 4836 #ifdef MWL_TX_NODROP 4837 if (bootverbose) 4838 device_printf(sc->sc_dev, "no tx drop\n"); 4839 #endif 4840 } 4841