1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_reset_rx_ring(struct wpi_softc *); 156 static void wpi_free_rx_ring(struct wpi_softc *); 157 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 158 int); 159 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 160 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static int wpi_read_eeprom(struct wpi_softc *, 163 uint8_t macaddr[IEEE80211_ADDR_LEN]); 164 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 165 static void wpi_read_eeprom_band(struct wpi_softc *, int); 166 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 167 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 168 struct ieee80211_channel *); 169 static int wpi_setregdomain(struct ieee80211com *, 170 struct ieee80211_regdomain *, int, 171 struct ieee80211_channel[]); 172 static int wpi_read_eeprom_group(struct wpi_softc *, int); 173 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 174 static void wpi_node_free(struct ieee80211_node *); 175 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 176 const uint8_t mac[IEEE80211_ADDR_LEN]); 177 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 178 static void wpi_calib_timeout(void *); 179 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 180 struct wpi_rx_data *); 181 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 182 struct wpi_rx_data *); 183 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 184 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 185 static void wpi_notif_intr(struct wpi_softc *); 186 static void wpi_wakeup_intr(struct wpi_softc *); 187 #ifdef WPI_DEBUG 188 static void wpi_debug_registers(struct wpi_softc *); 189 #endif 190 static void wpi_fatal_intr(struct wpi_softc *); 191 static void wpi_intr(void *); 192 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 193 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 194 struct ieee80211_node *); 195 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 196 struct ieee80211_node *, 197 const struct ieee80211_bpf_params *); 198 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 199 const struct ieee80211_bpf_params *); 200 static void wpi_start(struct ifnet *); 201 static void wpi_start_task(void *, int); 202 static void wpi_watchdog_rfkill(void *); 203 static void wpi_scan_timeout(void *); 204 static void wpi_tx_timeout(void *); 205 static int wpi_ioctl(struct ifnet *, u_long, caddr_t); 206 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 207 static int wpi_mrr_setup(struct wpi_softc *); 208 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 209 static int wpi_add_broadcast_node(struct wpi_softc *, int); 210 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 211 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 212 static int wpi_updateedca(struct ieee80211com *); 213 static void wpi_set_promisc(struct wpi_softc *); 214 static void wpi_update_promisc(struct ifnet *); 215 static void wpi_update_mcast(struct ifnet *); 216 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 217 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 218 static void wpi_power_calibration(struct wpi_softc *); 219 static int wpi_set_txpower(struct wpi_softc *, int); 220 static int wpi_get_power_index(struct wpi_softc *, 221 struct wpi_power_group *, uint8_t, int, int); 222 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 223 static int wpi_send_btcoex(struct wpi_softc *); 224 static int wpi_send_rxon(struct wpi_softc *, int, int); 225 static int wpi_config(struct wpi_softc *); 226 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 227 struct ieee80211_channel *, uint8_t); 228 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 229 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 230 struct ieee80211_channel *); 231 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 232 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 233 static int wpi_config_beacon(struct wpi_vap *); 234 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 235 static void wpi_update_beacon(struct ieee80211vap *, int); 236 static void wpi_newassoc(struct ieee80211_node *, int); 237 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 238 static int wpi_load_key(struct ieee80211_node *, 239 const struct ieee80211_key *); 240 static void wpi_load_key_cb(void *, struct ieee80211_node *); 241 static int wpi_set_global_keys(struct ieee80211_node *); 242 static int wpi_del_key(struct ieee80211_node *, 243 const struct ieee80211_key *); 244 static void wpi_del_key_cb(void *, struct ieee80211_node *); 245 static int wpi_process_key(struct ieee80211vap *, 246 const struct ieee80211_key *, int); 247 static int wpi_key_set(struct ieee80211vap *, 248 const struct ieee80211_key *, 249 const uint8_t mac[IEEE80211_ADDR_LEN]); 250 static int wpi_key_delete(struct ieee80211vap *, 251 const struct ieee80211_key *); 252 static int wpi_post_alive(struct wpi_softc *); 253 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 254 static int wpi_load_firmware(struct wpi_softc *); 255 static int wpi_read_firmware(struct wpi_softc *); 256 static void wpi_unload_firmware(struct wpi_softc *); 257 static int wpi_clock_wait(struct wpi_softc *); 258 static int wpi_apm_init(struct wpi_softc *); 259 static void wpi_apm_stop_master(struct wpi_softc *); 260 static void wpi_apm_stop(struct wpi_softc *); 261 static void wpi_nic_config(struct wpi_softc *); 262 static int wpi_hw_init(struct wpi_softc *); 263 static void wpi_hw_stop(struct wpi_softc *); 264 static void wpi_radio_on(void *, int); 265 static void wpi_radio_off(void *, int); 266 static void wpi_init(void *); 267 static void wpi_stop_locked(struct wpi_softc *); 268 static void wpi_stop(struct wpi_softc *); 269 static void wpi_scan_start(struct ieee80211com *); 270 static void wpi_scan_end(struct ieee80211com *); 271 static void wpi_set_channel(struct ieee80211com *); 272 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 273 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 274 static void wpi_hw_reset(void *, int); 275 276 static device_method_t wpi_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, wpi_probe), 279 DEVMETHOD(device_attach, wpi_attach), 280 DEVMETHOD(device_detach, wpi_detach), 281 DEVMETHOD(device_shutdown, wpi_shutdown), 282 DEVMETHOD(device_suspend, wpi_suspend), 283 DEVMETHOD(device_resume, wpi_resume), 284 285 DEVMETHOD_END 286 }; 287 288 static driver_t wpi_driver = { 289 "wpi", 290 wpi_methods, 291 sizeof (struct wpi_softc) 292 }; 293 static devclass_t wpi_devclass; 294 295 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 296 297 MODULE_VERSION(wpi, 1); 298 299 MODULE_DEPEND(wpi, pci, 1, 1, 1); 300 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 301 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 302 303 static int 304 wpi_probe(device_t dev) 305 { 306 const struct wpi_ident *ident; 307 308 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 309 if (pci_get_vendor(dev) == ident->vendor && 310 pci_get_device(dev) == ident->device) { 311 device_set_desc(dev, ident->name); 312 return (BUS_PROBE_DEFAULT); 313 } 314 } 315 return ENXIO; 316 } 317 318 static int 319 wpi_attach(device_t dev) 320 { 321 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 322 struct ieee80211com *ic; 323 struct ifnet *ifp; 324 int i, error, rid; 325 #ifdef WPI_DEBUG 326 int supportsa = 1; 327 const struct wpi_ident *ident; 328 #endif 329 uint8_t macaddr[IEEE80211_ADDR_LEN]; 330 331 sc->sc_dev = dev; 332 333 #ifdef WPI_DEBUG 334 error = resource_int_value(device_get_name(sc->sc_dev), 335 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 336 if (error != 0) 337 sc->sc_debug = 0; 338 #else 339 sc->sc_debug = 0; 340 #endif 341 342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 343 344 /* 345 * Get the offset of the PCI Express Capability Structure in PCI 346 * Configuration Space. 347 */ 348 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 349 if (error != 0) { 350 device_printf(dev, "PCIe capability structure not found!\n"); 351 return error; 352 } 353 354 /* 355 * Some card's only support 802.11b/g not a, check to see if 356 * this is one such card. A 0x0 in the subdevice table indicates 357 * the entire subdevice range is to be ignored. 358 */ 359 #ifdef WPI_DEBUG 360 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 361 if (ident->subdevice && 362 pci_get_subdevice(dev) == ident->subdevice) { 363 supportsa = 0; 364 break; 365 } 366 } 367 #endif 368 369 /* Clear device-specific "PCI retry timeout" register (41h). */ 370 pci_write_config(dev, 0x41, 0, 1); 371 372 /* Enable bus-mastering. */ 373 pci_enable_busmaster(dev); 374 375 rid = PCIR_BAR(0); 376 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 377 RF_ACTIVE); 378 if (sc->mem == NULL) { 379 device_printf(dev, "can't map mem space\n"); 380 return ENOMEM; 381 } 382 sc->sc_st = rman_get_bustag(sc->mem); 383 sc->sc_sh = rman_get_bushandle(sc->mem); 384 385 i = 1; 386 rid = 0; 387 if (pci_alloc_msi(dev, &i) == 0) 388 rid = 1; 389 /* Install interrupt handler. */ 390 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 391 (rid != 0 ? 0 : RF_SHAREABLE)); 392 if (sc->irq == NULL) { 393 device_printf(dev, "can't map interrupt\n"); 394 error = ENOMEM; 395 goto fail; 396 } 397 398 WPI_LOCK_INIT(sc); 399 WPI_TX_LOCK_INIT(sc); 400 WPI_RXON_LOCK_INIT(sc); 401 WPI_NT_LOCK_INIT(sc); 402 WPI_TXQ_LOCK_INIT(sc); 403 WPI_TXQ_STATE_LOCK_INIT(sc); 404 405 /* Allocate DMA memory for firmware transfers. */ 406 if ((error = wpi_alloc_fwmem(sc)) != 0) { 407 device_printf(dev, 408 "could not allocate memory for firmware, error %d\n", 409 error); 410 goto fail; 411 } 412 413 /* Allocate shared page. */ 414 if ((error = wpi_alloc_shared(sc)) != 0) { 415 device_printf(dev, "could not allocate shared page\n"); 416 goto fail; 417 } 418 419 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 420 for (i = 0; i < WPI_NTXQUEUES; i++) { 421 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 422 device_printf(dev, 423 "could not allocate TX ring %d, error %d\n", i, 424 error); 425 goto fail; 426 } 427 } 428 429 /* Allocate RX ring. */ 430 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 431 device_printf(dev, "could not allocate RX ring, error %d\n", 432 error); 433 goto fail; 434 } 435 436 /* Clear pending interrupts. */ 437 WPI_WRITE(sc, WPI_INT, 0xffffffff); 438 439 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 440 if (ifp == NULL) { 441 device_printf(dev, "can not allocate ifnet structure\n"); 442 goto fail; 443 } 444 445 ic = ifp->if_l2com; 446 ic->ic_ifp = ifp; 447 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 448 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 449 450 /* Set device capabilities. */ 451 ic->ic_caps = 452 IEEE80211_C_STA /* station mode supported */ 453 | IEEE80211_C_IBSS /* IBSS mode supported */ 454 | IEEE80211_C_HOSTAP /* Host access point mode */ 455 | IEEE80211_C_MONITOR /* monitor mode supported */ 456 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 457 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 458 | IEEE80211_C_TXPMGT /* tx power management */ 459 | IEEE80211_C_SHSLOT /* short slot time supported */ 460 | IEEE80211_C_WPA /* 802.11i */ 461 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 462 | IEEE80211_C_WME /* 802.11e */ 463 | IEEE80211_C_PMGT /* Station-side power mgmt */ 464 ; 465 466 ic->ic_cryptocaps = 467 IEEE80211_CRYPTO_AES_CCM; 468 469 /* 470 * Read in the eeprom and also setup the channels for 471 * net80211. We don't set the rates as net80211 does this for us 472 */ 473 if ((error = wpi_read_eeprom(sc, macaddr)) != 0) { 474 device_printf(dev, "could not read EEPROM, error %d\n", 475 error); 476 goto fail; 477 } 478 479 #ifdef WPI_DEBUG 480 if (bootverbose) { 481 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 482 sc->domain); 483 device_printf(sc->sc_dev, "Hardware Type: %c\n", 484 sc->type > 1 ? 'B': '?'); 485 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 486 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 487 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 488 supportsa ? "does" : "does not"); 489 490 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 491 check what sc->rev really represents - benjsc 20070615 */ 492 } 493 #endif 494 495 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 496 ifp->if_softc = sc; 497 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 498 ifp->if_init = wpi_init; 499 ifp->if_ioctl = wpi_ioctl; 500 ifp->if_start = wpi_start; 501 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 502 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 503 IFQ_SET_READY(&ifp->if_snd); 504 505 ieee80211_ifattach(ic, macaddr); 506 ic->ic_vap_create = wpi_vap_create; 507 ic->ic_vap_delete = wpi_vap_delete; 508 ic->ic_raw_xmit = wpi_raw_xmit; 509 ic->ic_node_alloc = wpi_node_alloc; 510 sc->sc_node_free = ic->ic_node_free; 511 ic->ic_node_free = wpi_node_free; 512 ic->ic_wme.wme_update = wpi_updateedca; 513 ic->ic_update_promisc = wpi_update_promisc; 514 ic->ic_update_mcast = wpi_update_mcast; 515 ic->ic_newassoc = wpi_newassoc; 516 ic->ic_scan_start = wpi_scan_start; 517 ic->ic_scan_end = wpi_scan_end; 518 ic->ic_set_channel = wpi_set_channel; 519 sc->sc_scan_curchan = ic->ic_scan_curchan; 520 ic->ic_scan_curchan = wpi_scan_curchan; 521 ic->ic_scan_mindwell = wpi_scan_mindwell; 522 ic->ic_setregdomain = wpi_setregdomain; 523 524 wpi_radiotap_attach(sc); 525 526 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 528 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 529 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 530 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 531 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 532 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 533 TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc); 534 535 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 536 taskqueue_thread_enqueue, &sc->sc_tq); 537 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 538 if (error != 0) { 539 device_printf(dev, "can't start threads, error %d\n", error); 540 goto fail; 541 } 542 543 wpi_sysctlattach(sc); 544 545 /* 546 * Hook our interrupt after all initialization is complete. 547 */ 548 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 549 NULL, wpi_intr, sc, &sc->sc_ih); 550 if (error != 0) { 551 device_printf(dev, "can't establish interrupt, error %d\n", 552 error); 553 goto fail; 554 } 555 556 if (bootverbose) 557 ieee80211_announce(ic); 558 559 #ifdef WPI_DEBUG 560 if (sc->sc_debug & WPI_DEBUG_HW) 561 ieee80211_announce_channels(ic); 562 #endif 563 564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 565 return 0; 566 567 fail: wpi_detach(dev); 568 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 569 return error; 570 } 571 572 /* 573 * Attach the interface to 802.11 radiotap. 574 */ 575 static void 576 wpi_radiotap_attach(struct wpi_softc *sc) 577 { 578 struct ifnet *ifp = sc->sc_ifp; 579 struct ieee80211com *ic = ifp->if_l2com; 580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 581 ieee80211_radiotap_attach(ic, 582 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 583 WPI_TX_RADIOTAP_PRESENT, 584 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 585 WPI_RX_RADIOTAP_PRESENT); 586 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 587 } 588 589 static void 590 wpi_sysctlattach(struct wpi_softc *sc) 591 { 592 #ifdef WPI_DEBUG 593 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 594 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 595 596 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 597 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 598 "control debugging printfs"); 599 #endif 600 } 601 602 static void 603 wpi_init_beacon(struct wpi_vap *wvp) 604 { 605 struct wpi_buf *bcn = &wvp->wv_bcbuf; 606 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 607 608 cmd->id = WPI_ID_BROADCAST; 609 cmd->ofdm_mask = 0xff; 610 cmd->cck_mask = 0x0f; 611 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 612 cmd->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP); 613 614 bcn->code = WPI_CMD_SET_BEACON; 615 bcn->ac = WPI_CMD_QUEUE_NUM; 616 bcn->size = sizeof(struct wpi_cmd_beacon); 617 } 618 619 static struct ieee80211vap * 620 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 621 enum ieee80211_opmode opmode, int flags, 622 const uint8_t bssid[IEEE80211_ADDR_LEN], 623 const uint8_t mac[IEEE80211_ADDR_LEN]) 624 { 625 struct wpi_vap *wvp; 626 struct ieee80211vap *vap; 627 628 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 629 return NULL; 630 631 wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), 632 M_80211_VAP, M_NOWAIT | M_ZERO); 633 if (wvp == NULL) 634 return NULL; 635 vap = &wvp->wv_vap; 636 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 637 638 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 639 WPI_VAP_LOCK_INIT(wvp); 640 wpi_init_beacon(wvp); 641 } 642 643 /* Override with driver methods. */ 644 vap->iv_key_set = wpi_key_set; 645 vap->iv_key_delete = wpi_key_delete; 646 wvp->wv_newstate = vap->iv_newstate; 647 vap->iv_newstate = wpi_newstate; 648 vap->iv_update_beacon = wpi_update_beacon; 649 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 650 651 ieee80211_ratectl_init(vap); 652 /* Complete setup. */ 653 ieee80211_vap_attach(vap, ieee80211_media_change, 654 ieee80211_media_status); 655 ic->ic_opmode = opmode; 656 return vap; 657 } 658 659 static void 660 wpi_vap_delete(struct ieee80211vap *vap) 661 { 662 struct wpi_vap *wvp = WPI_VAP(vap); 663 struct wpi_buf *bcn = &wvp->wv_bcbuf; 664 enum ieee80211_opmode opmode = vap->iv_opmode; 665 666 ieee80211_ratectl_deinit(vap); 667 ieee80211_vap_detach(vap); 668 669 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 670 if (bcn->m != NULL) 671 m_freem(bcn->m); 672 673 WPI_VAP_LOCK_DESTROY(wvp); 674 } 675 676 free(wvp, M_80211_VAP); 677 } 678 679 static int 680 wpi_detach(device_t dev) 681 { 682 struct wpi_softc *sc = device_get_softc(dev); 683 struct ifnet *ifp = sc->sc_ifp; 684 struct ieee80211com *ic; 685 int qid; 686 687 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 688 689 if (ifp != NULL) { 690 ic = ifp->if_l2com; 691 692 ieee80211_draintask(ic, &sc->sc_reinittask); 693 ieee80211_draintask(ic, &sc->sc_radiooff_task); 694 ieee80211_draintask(ic, &sc->sc_radioon_task); 695 ieee80211_draintask(ic, &sc->sc_start_task); 696 697 wpi_stop(sc); 698 699 taskqueue_drain_all(sc->sc_tq); 700 taskqueue_free(sc->sc_tq); 701 702 callout_drain(&sc->watchdog_rfkill); 703 callout_drain(&sc->tx_timeout); 704 callout_drain(&sc->scan_timeout); 705 callout_drain(&sc->calib_to); 706 ieee80211_ifdetach(ic); 707 } 708 709 /* Uninstall interrupt handler. */ 710 if (sc->irq != NULL) { 711 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 712 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 713 sc->irq); 714 pci_release_msi(dev); 715 } 716 717 if (sc->txq[0].data_dmat) { 718 /* Free DMA resources. */ 719 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 720 wpi_free_tx_ring(sc, &sc->txq[qid]); 721 722 wpi_free_rx_ring(sc); 723 wpi_free_shared(sc); 724 } 725 726 if (sc->fw_dma.tag) 727 wpi_free_fwmem(sc); 728 729 if (sc->mem != NULL) 730 bus_release_resource(dev, SYS_RES_MEMORY, 731 rman_get_rid(sc->mem), sc->mem); 732 733 if (ifp != NULL) 734 if_free(ifp); 735 736 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 737 WPI_TXQ_STATE_LOCK_DESTROY(sc); 738 WPI_TXQ_LOCK_DESTROY(sc); 739 WPI_NT_LOCK_DESTROY(sc); 740 WPI_RXON_LOCK_DESTROY(sc); 741 WPI_TX_LOCK_DESTROY(sc); 742 WPI_LOCK_DESTROY(sc); 743 return 0; 744 } 745 746 static int 747 wpi_shutdown(device_t dev) 748 { 749 struct wpi_softc *sc = device_get_softc(dev); 750 751 wpi_stop(sc); 752 return 0; 753 } 754 755 static int 756 wpi_suspend(device_t dev) 757 { 758 struct wpi_softc *sc = device_get_softc(dev); 759 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 760 761 ieee80211_suspend_all(ic); 762 return 0; 763 } 764 765 static int 766 wpi_resume(device_t dev) 767 { 768 struct wpi_softc *sc = device_get_softc(dev); 769 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 770 771 /* Clear device-specific "PCI retry timeout" register (41h). */ 772 pci_write_config(dev, 0x41, 0, 1); 773 774 ieee80211_resume_all(ic); 775 return 0; 776 } 777 778 /* 779 * Grab exclusive access to NIC memory. 780 */ 781 static int 782 wpi_nic_lock(struct wpi_softc *sc) 783 { 784 int ntries; 785 786 /* Request exclusive access to NIC. */ 787 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 788 789 /* Spin until we actually get the lock. */ 790 for (ntries = 0; ntries < 1000; ntries++) { 791 if ((WPI_READ(sc, WPI_GP_CNTRL) & 792 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 793 WPI_GP_CNTRL_MAC_ACCESS_ENA) 794 return 0; 795 DELAY(10); 796 } 797 798 device_printf(sc->sc_dev, "could not lock memory\n"); 799 800 return ETIMEDOUT; 801 } 802 803 /* 804 * Release lock on NIC memory. 805 */ 806 static __inline void 807 wpi_nic_unlock(struct wpi_softc *sc) 808 { 809 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 810 } 811 812 static __inline uint32_t 813 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 814 { 815 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 816 WPI_BARRIER_READ_WRITE(sc); 817 return WPI_READ(sc, WPI_PRPH_RDATA); 818 } 819 820 static __inline void 821 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 822 { 823 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 824 WPI_BARRIER_WRITE(sc); 825 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 826 } 827 828 static __inline void 829 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 830 { 831 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 832 } 833 834 static __inline void 835 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 836 { 837 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 838 } 839 840 static __inline void 841 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 842 const uint32_t *data, int count) 843 { 844 for (; count > 0; count--, data++, addr += 4) 845 wpi_prph_write(sc, addr, *data); 846 } 847 848 static __inline uint32_t 849 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 850 { 851 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 852 WPI_BARRIER_READ_WRITE(sc); 853 return WPI_READ(sc, WPI_MEM_RDATA); 854 } 855 856 static __inline void 857 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 858 int count) 859 { 860 for (; count > 0; count--, addr += 4) 861 *data++ = wpi_mem_read(sc, addr); 862 } 863 864 static int 865 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 866 { 867 uint8_t *out = data; 868 uint32_t val; 869 int error, ntries; 870 871 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 872 873 if ((error = wpi_nic_lock(sc)) != 0) 874 return error; 875 876 for (; count > 0; count -= 2, addr++) { 877 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 878 for (ntries = 0; ntries < 10; ntries++) { 879 val = WPI_READ(sc, WPI_EEPROM); 880 if (val & WPI_EEPROM_READ_VALID) 881 break; 882 DELAY(5); 883 } 884 if (ntries == 10) { 885 device_printf(sc->sc_dev, 886 "timeout reading ROM at 0x%x\n", addr); 887 return ETIMEDOUT; 888 } 889 *out++= val >> 16; 890 if (count > 1) 891 *out ++= val >> 24; 892 } 893 894 wpi_nic_unlock(sc); 895 896 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 897 898 return 0; 899 } 900 901 static void 902 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 903 { 904 if (error != 0) 905 return; 906 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 907 *(bus_addr_t *)arg = segs[0].ds_addr; 908 } 909 910 /* 911 * Allocates a contiguous block of dma memory of the requested size and 912 * alignment. 913 */ 914 static int 915 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 916 void **kvap, bus_size_t size, bus_size_t alignment) 917 { 918 int error; 919 920 dma->tag = NULL; 921 dma->size = size; 922 923 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 924 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 925 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 926 if (error != 0) 927 goto fail; 928 929 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 930 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 931 if (error != 0) 932 goto fail; 933 934 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 935 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 936 if (error != 0) 937 goto fail; 938 939 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 940 941 if (kvap != NULL) 942 *kvap = dma->vaddr; 943 944 return 0; 945 946 fail: wpi_dma_contig_free(dma); 947 return error; 948 } 949 950 static void 951 wpi_dma_contig_free(struct wpi_dma_info *dma) 952 { 953 if (dma->vaddr != NULL) { 954 bus_dmamap_sync(dma->tag, dma->map, 955 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 956 bus_dmamap_unload(dma->tag, dma->map); 957 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 958 dma->vaddr = NULL; 959 } 960 if (dma->tag != NULL) { 961 bus_dma_tag_destroy(dma->tag); 962 dma->tag = NULL; 963 } 964 } 965 966 /* 967 * Allocate a shared page between host and NIC. 968 */ 969 static int 970 wpi_alloc_shared(struct wpi_softc *sc) 971 { 972 /* Shared buffer must be aligned on a 4KB boundary. */ 973 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 974 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 975 } 976 977 static void 978 wpi_free_shared(struct wpi_softc *sc) 979 { 980 wpi_dma_contig_free(&sc->shared_dma); 981 } 982 983 /* 984 * Allocate DMA-safe memory for firmware transfer. 985 */ 986 static int 987 wpi_alloc_fwmem(struct wpi_softc *sc) 988 { 989 /* Must be aligned on a 16-byte boundary. */ 990 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 991 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 992 } 993 994 static void 995 wpi_free_fwmem(struct wpi_softc *sc) 996 { 997 wpi_dma_contig_free(&sc->fw_dma); 998 } 999 1000 static int 1001 wpi_alloc_rx_ring(struct wpi_softc *sc) 1002 { 1003 struct wpi_rx_ring *ring = &sc->rxq; 1004 bus_size_t size; 1005 int i, error; 1006 1007 ring->cur = 0; 1008 ring->update = 0; 1009 1010 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1011 1012 /* Allocate RX descriptors (16KB aligned.) */ 1013 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1014 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1015 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1016 if (error != 0) { 1017 device_printf(sc->sc_dev, 1018 "%s: could not allocate RX ring DMA memory, error %d\n", 1019 __func__, error); 1020 goto fail; 1021 } 1022 1023 /* Create RX buffer DMA tag. */ 1024 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1025 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1026 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1027 &ring->data_dmat); 1028 if (error != 0) { 1029 device_printf(sc->sc_dev, 1030 "%s: could not create RX buf DMA tag, error %d\n", 1031 __func__, error); 1032 goto fail; 1033 } 1034 1035 /* 1036 * Allocate and map RX buffers. 1037 */ 1038 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1039 struct wpi_rx_data *data = &ring->data[i]; 1040 bus_addr_t paddr; 1041 1042 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1043 if (error != 0) { 1044 device_printf(sc->sc_dev, 1045 "%s: could not create RX buf DMA map, error %d\n", 1046 __func__, error); 1047 goto fail; 1048 } 1049 1050 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1051 if (data->m == NULL) { 1052 device_printf(sc->sc_dev, 1053 "%s: could not allocate RX mbuf\n", __func__); 1054 error = ENOBUFS; 1055 goto fail; 1056 } 1057 1058 error = bus_dmamap_load(ring->data_dmat, data->map, 1059 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1060 &paddr, BUS_DMA_NOWAIT); 1061 if (error != 0 && error != EFBIG) { 1062 device_printf(sc->sc_dev, 1063 "%s: can't map mbuf (error %d)\n", __func__, 1064 error); 1065 goto fail; 1066 } 1067 1068 /* Set physical address of RX buffer. */ 1069 ring->desc[i] = htole32(paddr); 1070 } 1071 1072 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1073 BUS_DMASYNC_PREWRITE); 1074 1075 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1076 1077 return 0; 1078 1079 fail: wpi_free_rx_ring(sc); 1080 1081 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1082 1083 return error; 1084 } 1085 1086 static void 1087 wpi_update_rx_ring(struct wpi_softc *sc) 1088 { 1089 struct wpi_rx_ring *ring = &sc->rxq; 1090 1091 if (ring->update != 0) { 1092 /* Wait for INT_WAKEUP event. */ 1093 return; 1094 } 1095 1096 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1097 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1098 __func__); 1099 1100 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1101 ring->update = 1; 1102 } else 1103 WPI_WRITE(sc, WPI_FH_RX_WPTR, ring->cur & ~7); 1104 } 1105 1106 static void 1107 wpi_reset_rx_ring(struct wpi_softc *sc) 1108 { 1109 struct wpi_rx_ring *ring = &sc->rxq; 1110 int ntries; 1111 1112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1113 1114 if (wpi_nic_lock(sc) == 0) { 1115 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1116 for (ntries = 0; ntries < 1000; ntries++) { 1117 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1118 WPI_FH_RX_STATUS_IDLE) 1119 break; 1120 DELAY(10); 1121 } 1122 wpi_nic_unlock(sc); 1123 } 1124 1125 ring->cur = 0; 1126 ring->update = 0; 1127 } 1128 1129 static void 1130 wpi_free_rx_ring(struct wpi_softc *sc) 1131 { 1132 struct wpi_rx_ring *ring = &sc->rxq; 1133 int i; 1134 1135 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1136 1137 wpi_dma_contig_free(&ring->desc_dma); 1138 1139 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1140 struct wpi_rx_data *data = &ring->data[i]; 1141 1142 if (data->m != NULL) { 1143 bus_dmamap_sync(ring->data_dmat, data->map, 1144 BUS_DMASYNC_POSTREAD); 1145 bus_dmamap_unload(ring->data_dmat, data->map); 1146 m_freem(data->m); 1147 data->m = NULL; 1148 } 1149 if (data->map != NULL) 1150 bus_dmamap_destroy(ring->data_dmat, data->map); 1151 } 1152 if (ring->data_dmat != NULL) { 1153 bus_dma_tag_destroy(ring->data_dmat); 1154 ring->data_dmat = NULL; 1155 } 1156 } 1157 1158 static int 1159 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1160 { 1161 bus_addr_t paddr; 1162 bus_size_t size; 1163 int i, error; 1164 1165 ring->qid = qid; 1166 ring->queued = 0; 1167 ring->cur = 0; 1168 ring->update = 0; 1169 1170 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1171 1172 /* Allocate TX descriptors (16KB aligned.) */ 1173 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1174 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1175 size, WPI_RING_DMA_ALIGN); 1176 if (error != 0) { 1177 device_printf(sc->sc_dev, 1178 "%s: could not allocate TX ring DMA memory, error %d\n", 1179 __func__, error); 1180 goto fail; 1181 } 1182 1183 /* Update shared area with ring physical address. */ 1184 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1185 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1186 BUS_DMASYNC_PREWRITE); 1187 1188 /* 1189 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1190 * to allocate commands space for other rings. 1191 * XXX Do we really need to allocate descriptors for other rings? 1192 */ 1193 if (qid > WPI_CMD_QUEUE_NUM) { 1194 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1195 return 0; 1196 } 1197 1198 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1199 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1200 size, 4); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not allocate TX cmd DMA memory, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1209 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1210 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1211 &ring->data_dmat); 1212 if (error != 0) { 1213 device_printf(sc->sc_dev, 1214 "%s: could not create TX buf DMA tag, error %d\n", 1215 __func__, error); 1216 goto fail; 1217 } 1218 1219 paddr = ring->cmd_dma.paddr; 1220 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1221 struct wpi_tx_data *data = &ring->data[i]; 1222 1223 data->cmd_paddr = paddr; 1224 paddr += sizeof (struct wpi_tx_cmd); 1225 1226 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1227 if (error != 0) { 1228 device_printf(sc->sc_dev, 1229 "%s: could not create TX buf DMA map, error %d\n", 1230 __func__, error); 1231 goto fail; 1232 } 1233 } 1234 1235 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1236 1237 return 0; 1238 1239 fail: wpi_free_tx_ring(sc, ring); 1240 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1241 return error; 1242 } 1243 1244 static void 1245 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1246 { 1247 if (ring->update != 0) { 1248 /* Wait for INT_WAKEUP event. */ 1249 return; 1250 } 1251 1252 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1253 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1254 __func__, ring->qid); 1255 1256 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1257 ring->update = 1; 1258 } else 1259 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1260 } 1261 1262 static void 1263 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1264 { 1265 int i; 1266 1267 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1268 1269 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1270 struct wpi_tx_data *data = &ring->data[i]; 1271 1272 if (data->m != NULL) { 1273 bus_dmamap_sync(ring->data_dmat, data->map, 1274 BUS_DMASYNC_POSTWRITE); 1275 bus_dmamap_unload(ring->data_dmat, data->map); 1276 m_freem(data->m); 1277 data->m = NULL; 1278 } 1279 } 1280 /* Clear TX descriptors. */ 1281 memset(ring->desc, 0, ring->desc_dma.size); 1282 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1283 BUS_DMASYNC_PREWRITE); 1284 sc->qfullmsk &= ~(1 << ring->qid); 1285 ring->queued = 0; 1286 ring->cur = 0; 1287 ring->update = 0; 1288 } 1289 1290 static void 1291 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1292 { 1293 int i; 1294 1295 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1296 1297 wpi_dma_contig_free(&ring->desc_dma); 1298 wpi_dma_contig_free(&ring->cmd_dma); 1299 1300 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1301 struct wpi_tx_data *data = &ring->data[i]; 1302 1303 if (data->m != NULL) { 1304 bus_dmamap_sync(ring->data_dmat, data->map, 1305 BUS_DMASYNC_POSTWRITE); 1306 bus_dmamap_unload(ring->data_dmat, data->map); 1307 m_freem(data->m); 1308 } 1309 if (data->map != NULL) 1310 bus_dmamap_destroy(ring->data_dmat, data->map); 1311 } 1312 if (ring->data_dmat != NULL) { 1313 bus_dma_tag_destroy(ring->data_dmat); 1314 ring->data_dmat = NULL; 1315 } 1316 } 1317 1318 /* 1319 * Extract various information from EEPROM. 1320 */ 1321 static int 1322 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1323 { 1324 #define WPI_CHK(res) do { \ 1325 if ((error = res) != 0) \ 1326 goto fail; \ 1327 } while (0) 1328 int error, i; 1329 1330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1331 1332 /* Adapter has to be powered on for EEPROM access to work. */ 1333 if ((error = wpi_apm_init(sc)) != 0) { 1334 device_printf(sc->sc_dev, 1335 "%s: could not power ON adapter, error %d\n", __func__, 1336 error); 1337 return error; 1338 } 1339 1340 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1341 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1342 error = EIO; 1343 goto fail; 1344 } 1345 /* Clear HW ownership of EEPROM. */ 1346 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1347 1348 /* Read the hardware capabilities, revision and SKU type. */ 1349 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1350 sizeof(sc->cap))); 1351 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1352 sizeof(sc->rev))); 1353 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1354 sizeof(sc->type))); 1355 1356 sc->rev = le16toh(sc->rev); 1357 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1358 sc->rev, sc->type); 1359 1360 /* Read the regulatory domain (4 ASCII characters.) */ 1361 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1362 sizeof(sc->domain))); 1363 1364 /* Read MAC address. */ 1365 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1366 IEEE80211_ADDR_LEN)); 1367 1368 /* Read the list of authorized channels. */ 1369 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1370 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1371 1372 /* Read the list of TX power groups. */ 1373 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1374 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1375 1376 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1377 1378 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1379 __func__); 1380 1381 return error; 1382 #undef WPI_CHK 1383 } 1384 1385 /* 1386 * Translate EEPROM flags to net80211. 1387 */ 1388 static uint32_t 1389 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1390 { 1391 uint32_t nflags; 1392 1393 nflags = 0; 1394 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1395 nflags |= IEEE80211_CHAN_PASSIVE; 1396 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1397 nflags |= IEEE80211_CHAN_NOADHOC; 1398 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1399 nflags |= IEEE80211_CHAN_DFS; 1400 /* XXX apparently IBSS may still be marked */ 1401 nflags |= IEEE80211_CHAN_NOADHOC; 1402 } 1403 1404 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1405 if (nflags & IEEE80211_CHAN_NOADHOC) 1406 nflags |= IEEE80211_CHAN_NOHOSTAP; 1407 1408 return nflags; 1409 } 1410 1411 static void 1412 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1413 { 1414 struct ifnet *ifp = sc->sc_ifp; 1415 struct ieee80211com *ic = ifp->if_l2com; 1416 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1417 const struct wpi_chan_band *band = &wpi_bands[n]; 1418 struct ieee80211_channel *c; 1419 uint8_t chan; 1420 int i, nflags; 1421 1422 for (i = 0; i < band->nchan; i++) { 1423 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1424 DPRINTF(sc, WPI_DEBUG_EEPROM, 1425 "Channel Not Valid: %d, band %d\n", 1426 band->chan[i],n); 1427 continue; 1428 } 1429 1430 chan = band->chan[i]; 1431 nflags = wpi_eeprom_channel_flags(&channels[i]); 1432 1433 c = &ic->ic_channels[ic->ic_nchans++]; 1434 c->ic_ieee = chan; 1435 c->ic_maxregpower = channels[i].maxpwr; 1436 c->ic_maxpower = 2*c->ic_maxregpower; 1437 1438 if (n == 0) { /* 2GHz band */ 1439 c->ic_freq = ieee80211_ieee2mhz(chan, 1440 IEEE80211_CHAN_G); 1441 1442 /* G =>'s B is supported */ 1443 c->ic_flags = IEEE80211_CHAN_B | nflags; 1444 c = &ic->ic_channels[ic->ic_nchans++]; 1445 c[0] = c[-1]; 1446 c->ic_flags = IEEE80211_CHAN_G | nflags; 1447 } else { /* 5GHz band */ 1448 c->ic_freq = ieee80211_ieee2mhz(chan, 1449 IEEE80211_CHAN_A); 1450 1451 c->ic_flags = IEEE80211_CHAN_A | nflags; 1452 } 1453 1454 /* Save maximum allowed TX power for this channel. */ 1455 sc->maxpwr[chan] = channels[i].maxpwr; 1456 1457 DPRINTF(sc, WPI_DEBUG_EEPROM, 1458 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1459 " offset %d\n", chan, c->ic_freq, 1460 channels[i].flags, sc->maxpwr[chan], 1461 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1462 } 1463 } 1464 1465 /** 1466 * Read the eeprom to find out what channels are valid for the given 1467 * band and update net80211 with what we find. 1468 */ 1469 static int 1470 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1471 { 1472 struct ifnet *ifp = sc->sc_ifp; 1473 struct ieee80211com *ic = ifp->if_l2com; 1474 const struct wpi_chan_band *band = &wpi_bands[n]; 1475 int error; 1476 1477 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1478 1479 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1480 band->nchan * sizeof (struct wpi_eeprom_chan)); 1481 if (error != 0) { 1482 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1483 return error; 1484 } 1485 1486 wpi_read_eeprom_band(sc, n); 1487 1488 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1489 1490 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1491 1492 return 0; 1493 } 1494 1495 static struct wpi_eeprom_chan * 1496 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1497 { 1498 int i, j; 1499 1500 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1501 for (i = 0; i < wpi_bands[j].nchan; i++) 1502 if (wpi_bands[j].chan[i] == c->ic_ieee) 1503 return &sc->eeprom_channels[j][i]; 1504 1505 return NULL; 1506 } 1507 1508 /* 1509 * Enforce flags read from EEPROM. 1510 */ 1511 static int 1512 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1513 int nchan, struct ieee80211_channel chans[]) 1514 { 1515 struct ifnet *ifp = ic->ic_ifp; 1516 struct wpi_softc *sc = ifp->if_softc; 1517 int i; 1518 1519 for (i = 0; i < nchan; i++) { 1520 struct ieee80211_channel *c = &chans[i]; 1521 struct wpi_eeprom_chan *channel; 1522 1523 channel = wpi_find_eeprom_channel(sc, c); 1524 if (channel == NULL) { 1525 if_printf(ic->ic_ifp, 1526 "%s: invalid channel %u freq %u/0x%x\n", 1527 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1528 return EINVAL; 1529 } 1530 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1531 } 1532 1533 return 0; 1534 } 1535 1536 static int 1537 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1538 { 1539 struct wpi_power_group *group = &sc->groups[n]; 1540 struct wpi_eeprom_group rgroup; 1541 int i, error; 1542 1543 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1544 1545 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1546 &rgroup, sizeof rgroup)) != 0) { 1547 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1548 return error; 1549 } 1550 1551 /* Save TX power group information. */ 1552 group->chan = rgroup.chan; 1553 group->maxpwr = rgroup.maxpwr; 1554 /* Retrieve temperature at which the samples were taken. */ 1555 group->temp = (int16_t)le16toh(rgroup.temp); 1556 1557 DPRINTF(sc, WPI_DEBUG_EEPROM, 1558 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1559 group->maxpwr, group->temp); 1560 1561 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1562 group->samples[i].index = rgroup.samples[i].index; 1563 group->samples[i].power = rgroup.samples[i].power; 1564 1565 DPRINTF(sc, WPI_DEBUG_EEPROM, 1566 "\tsample %d: index=%d power=%d\n", i, 1567 group->samples[i].index, group->samples[i].power); 1568 } 1569 1570 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1571 1572 return 0; 1573 } 1574 1575 static int 1576 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1577 { 1578 int newid = WPI_ID_IBSS_MIN; 1579 1580 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1581 if ((sc->nodesmsk & (1 << newid)) == 0) { 1582 sc->nodesmsk |= 1 << newid; 1583 return newid; 1584 } 1585 } 1586 1587 return WPI_ID_UNDEFINED; 1588 } 1589 1590 static __inline int 1591 wpi_add_node_entry_sta(struct wpi_softc *sc) 1592 { 1593 sc->nodesmsk |= 1 << WPI_ID_BSS; 1594 1595 return WPI_ID_BSS; 1596 } 1597 1598 static __inline int 1599 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1600 { 1601 if (id == WPI_ID_UNDEFINED) 1602 return 0; 1603 1604 return (sc->nodesmsk >> id) & 1; 1605 } 1606 1607 static __inline void 1608 wpi_clear_node_table(struct wpi_softc *sc) 1609 { 1610 sc->nodesmsk = 0; 1611 } 1612 1613 static __inline void 1614 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1615 { 1616 sc->nodesmsk &= ~(1 << id); 1617 } 1618 1619 static struct ieee80211_node * 1620 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1621 { 1622 struct wpi_node *wn; 1623 1624 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1625 M_NOWAIT | M_ZERO); 1626 1627 if (wn == NULL) 1628 return NULL; 1629 1630 wn->id = WPI_ID_UNDEFINED; 1631 1632 return &wn->ni; 1633 } 1634 1635 static void 1636 wpi_node_free(struct ieee80211_node *ni) 1637 { 1638 struct ieee80211com *ic = ni->ni_ic; 1639 struct wpi_softc *sc = ic->ic_ifp->if_softc; 1640 struct wpi_node *wn = WPI_NODE(ni); 1641 1642 if (wn->id != WPI_ID_UNDEFINED) { 1643 WPI_NT_LOCK(sc); 1644 if (wpi_check_node_entry(sc, wn->id)) { 1645 wpi_del_node_entry(sc, wn->id); 1646 wpi_del_node(sc, ni); 1647 } 1648 WPI_NT_UNLOCK(sc); 1649 } 1650 1651 sc->sc_node_free(ni); 1652 } 1653 1654 /** 1655 * Called by net80211 when ever there is a change to 80211 state machine 1656 */ 1657 static int 1658 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1659 { 1660 struct wpi_vap *wvp = WPI_VAP(vap); 1661 struct ieee80211com *ic = vap->iv_ic; 1662 struct ifnet *ifp = ic->ic_ifp; 1663 struct wpi_softc *sc = ifp->if_softc; 1664 int error = 0; 1665 1666 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1667 1668 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1669 ieee80211_state_name[vap->iv_state], 1670 ieee80211_state_name[nstate]); 1671 1672 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 1673 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1674 device_printf(sc->sc_dev, 1675 "%s: could not set power saving level\n", 1676 __func__); 1677 return error; 1678 } 1679 } 1680 1681 switch (nstate) { 1682 case IEEE80211_S_SCAN: 1683 WPI_RXON_LOCK(sc); 1684 if ((sc->rxon.filter & htole32(WPI_FILTER_BSS)) && 1685 vap->iv_opmode != IEEE80211_M_STA) { 1686 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1687 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1688 device_printf(sc->sc_dev, 1689 "%s: could not send RXON\n", __func__); 1690 } 1691 } 1692 WPI_RXON_UNLOCK(sc); 1693 break; 1694 1695 case IEEE80211_S_ASSOC: 1696 if (vap->iv_state != IEEE80211_S_RUN) 1697 break; 1698 /* FALLTHROUGH */ 1699 case IEEE80211_S_AUTH: 1700 /* 1701 * The node must be registered in the firmware before auth. 1702 * Also the associd must be cleared on RUN -> ASSOC 1703 * transitions. 1704 */ 1705 if ((error = wpi_auth(sc, vap)) != 0) { 1706 device_printf(sc->sc_dev, 1707 "%s: could not move to AUTH state, error %d\n", 1708 __func__, error); 1709 } 1710 break; 1711 1712 case IEEE80211_S_RUN: 1713 /* 1714 * RUN -> RUN transition; Just restart the timers. 1715 */ 1716 if (vap->iv_state == IEEE80211_S_RUN) { 1717 WPI_RXON_LOCK(sc); 1718 wpi_calib_timeout(sc); 1719 WPI_RXON_UNLOCK(sc); 1720 break; 1721 } 1722 1723 /* 1724 * !RUN -> RUN requires setting the association id 1725 * which is done with a firmware cmd. We also defer 1726 * starting the timers until that work is done. 1727 */ 1728 if ((error = wpi_run(sc, vap)) != 0) { 1729 device_printf(sc->sc_dev, 1730 "%s: could not move to RUN state\n", __func__); 1731 } 1732 break; 1733 1734 default: 1735 break; 1736 } 1737 if (error != 0) { 1738 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1739 return error; 1740 } 1741 1742 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1743 1744 return wvp->wv_newstate(vap, nstate, arg); 1745 } 1746 1747 static void 1748 wpi_calib_timeout(void *arg) 1749 { 1750 struct wpi_softc *sc = arg; 1751 1752 if (!(sc->rxon.filter & htole32(WPI_FILTER_BSS))) 1753 return; 1754 1755 wpi_power_calibration(sc); 1756 1757 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1758 } 1759 1760 static __inline uint8_t 1761 rate2plcp(const uint8_t rate) 1762 { 1763 switch (rate) { 1764 case 12: return 0xd; 1765 case 18: return 0xf; 1766 case 24: return 0x5; 1767 case 36: return 0x7; 1768 case 48: return 0x9; 1769 case 72: return 0xb; 1770 case 96: return 0x1; 1771 case 108: return 0x3; 1772 case 2: return 10; 1773 case 4: return 20; 1774 case 11: return 55; 1775 case 22: return 110; 1776 default: return 0; 1777 } 1778 } 1779 1780 static __inline uint8_t 1781 plcp2rate(const uint8_t plcp) 1782 { 1783 switch (plcp) { 1784 case 0xd: return 12; 1785 case 0xf: return 18; 1786 case 0x5: return 24; 1787 case 0x7: return 36; 1788 case 0x9: return 48; 1789 case 0xb: return 72; 1790 case 0x1: return 96; 1791 case 0x3: return 108; 1792 case 10: return 2; 1793 case 20: return 4; 1794 case 55: return 11; 1795 case 110: return 22; 1796 default: return 0; 1797 } 1798 } 1799 1800 /* Quickly determine if a given rate is CCK or OFDM. */ 1801 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1802 1803 static void 1804 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1805 struct wpi_rx_data *data) 1806 { 1807 struct ifnet *ifp = sc->sc_ifp; 1808 struct ieee80211com *ic = ifp->if_l2com; 1809 struct wpi_rx_ring *ring = &sc->rxq; 1810 struct wpi_rx_stat *stat; 1811 struct wpi_rx_head *head; 1812 struct wpi_rx_tail *tail; 1813 struct ieee80211_frame *wh; 1814 struct ieee80211_node *ni; 1815 struct mbuf *m, *m1; 1816 bus_addr_t paddr; 1817 uint32_t flags; 1818 uint16_t len; 1819 int error; 1820 1821 stat = (struct wpi_rx_stat *)(desc + 1); 1822 1823 if (stat->len > WPI_STAT_MAXLEN) { 1824 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1825 goto fail1; 1826 } 1827 1828 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1829 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1830 len = le16toh(head->len); 1831 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1832 flags = le32toh(tail->flags); 1833 1834 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1835 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1836 le32toh(desc->len), len, (int8_t)stat->rssi, 1837 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1838 1839 /* Discard frames with a bad FCS early. */ 1840 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1841 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1842 __func__, flags); 1843 goto fail1; 1844 } 1845 /* Discard frames that are too short. */ 1846 if (len < sizeof (*wh)) { 1847 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1848 __func__, len); 1849 goto fail1; 1850 } 1851 1852 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1853 if (m1 == NULL) { 1854 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1855 __func__); 1856 goto fail1; 1857 } 1858 bus_dmamap_unload(ring->data_dmat, data->map); 1859 1860 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1861 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1862 if (error != 0 && error != EFBIG) { 1863 device_printf(sc->sc_dev, 1864 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1865 m_freem(m1); 1866 1867 /* Try to reload the old mbuf. */ 1868 error = bus_dmamap_load(ring->data_dmat, data->map, 1869 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1870 &paddr, BUS_DMA_NOWAIT); 1871 if (error != 0 && error != EFBIG) { 1872 panic("%s: could not load old RX mbuf", __func__); 1873 } 1874 /* Physical address may have changed. */ 1875 ring->desc[ring->cur] = htole32(paddr); 1876 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1877 BUS_DMASYNC_PREWRITE); 1878 goto fail1; 1879 } 1880 1881 m = data->m; 1882 data->m = m1; 1883 /* Update RX descriptor. */ 1884 ring->desc[ring->cur] = htole32(paddr); 1885 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1886 BUS_DMASYNC_PREWRITE); 1887 1888 /* Finalize mbuf. */ 1889 m->m_pkthdr.rcvif = ifp; 1890 m->m_data = (caddr_t)(head + 1); 1891 m->m_pkthdr.len = m->m_len = len; 1892 1893 /* Grab a reference to the source node. */ 1894 wh = mtod(m, struct ieee80211_frame *); 1895 1896 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1897 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 1898 /* Check whether decryption was successful or not. */ 1899 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 1900 DPRINTF(sc, WPI_DEBUG_RECV, 1901 "CCMP decryption failed 0x%x\n", flags); 1902 goto fail2; 1903 } 1904 m->m_flags |= M_WEP; 1905 } 1906 1907 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 1908 1909 if (ieee80211_radiotap_active(ic)) { 1910 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 1911 1912 tap->wr_flags = 0; 1913 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 1914 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 1915 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 1916 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 1917 tap->wr_tsft = tail->tstamp; 1918 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 1919 tap->wr_rate = plcp2rate(head->plcp); 1920 } 1921 1922 WPI_UNLOCK(sc); 1923 1924 /* Send the frame to the 802.11 layer. */ 1925 if (ni != NULL) { 1926 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 1927 /* Node is no longer needed. */ 1928 ieee80211_free_node(ni); 1929 } else 1930 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 1931 1932 WPI_LOCK(sc); 1933 1934 return; 1935 1936 fail2: m_freem(m); 1937 1938 fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1939 } 1940 1941 static void 1942 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1943 struct wpi_rx_data *data) 1944 { 1945 /* Ignore */ 1946 } 1947 1948 static void 1949 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 1950 { 1951 struct ifnet *ifp = sc->sc_ifp; 1952 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 1953 struct wpi_tx_data *data = &ring->data[desc->idx]; 1954 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 1955 struct mbuf *m; 1956 struct ieee80211_node *ni; 1957 struct ieee80211vap *vap; 1958 struct ieee80211com *ic; 1959 uint32_t status = le32toh(stat->status); 1960 int ackfailcnt = stat->ackfailcnt / 2; /* wpi_mrr_setup() */ 1961 1962 KASSERT(data->ni != NULL, ("no node")); 1963 KASSERT(data->m != NULL, ("no mbuf")); 1964 1965 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1966 1967 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 1968 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 1969 "status %x\n", __func__, desc->qid, desc->idx, ackfailcnt, 1970 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 1971 1972 /* Unmap and free mbuf. */ 1973 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 1974 bus_dmamap_unload(ring->data_dmat, data->map); 1975 m = data->m, data->m = NULL; 1976 ni = data->ni, data->ni = NULL; 1977 vap = ni->ni_vap; 1978 ic = vap->iv_ic; 1979 1980 /* 1981 * Update rate control statistics for the node. 1982 */ 1983 if ((status & 0xff) != 1) { 1984 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1985 ieee80211_ratectl_tx_complete(vap, ni, 1986 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 1987 } else { 1988 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1989 ieee80211_ratectl_tx_complete(vap, ni, 1990 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 1991 } 1992 1993 ieee80211_tx_complete(ni, m, (status & 0xff) != 1); 1994 1995 WPI_TXQ_STATE_LOCK(sc); 1996 ring->queued -= 1; 1997 if (ring->queued > 0) { 1998 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 1999 2000 if (sc->qfullmsk != 0 && 2001 ring->queued < WPI_TX_RING_LOMARK) { 2002 sc->qfullmsk &= ~(1 << ring->qid); 2003 IF_LOCK(&ifp->if_snd); 2004 if (sc->qfullmsk == 0 && 2005 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2006 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2007 IF_UNLOCK(&ifp->if_snd); 2008 ieee80211_runtask(ic, &sc->sc_start_task); 2009 } else 2010 IF_UNLOCK(&ifp->if_snd); 2011 } 2012 } else 2013 callout_stop(&sc->tx_timeout); 2014 WPI_TXQ_STATE_UNLOCK(sc); 2015 2016 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2017 } 2018 2019 /* 2020 * Process a "command done" firmware notification. This is where we wakeup 2021 * processes waiting for a synchronous command completion. 2022 */ 2023 static void 2024 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2025 { 2026 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2027 struct wpi_tx_data *data; 2028 2029 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2030 "type %s len %d\n", desc->qid, desc->idx, 2031 desc->flags, wpi_cmd_str(desc->type), 2032 le32toh(desc->len)); 2033 2034 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2035 return; /* Not a command ack. */ 2036 2037 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2038 2039 data = &ring->data[desc->idx]; 2040 2041 /* If the command was mapped in an mbuf, free it. */ 2042 if (data->m != NULL) { 2043 bus_dmamap_sync(ring->data_dmat, data->map, 2044 BUS_DMASYNC_POSTWRITE); 2045 bus_dmamap_unload(ring->data_dmat, data->map); 2046 m_freem(data->m); 2047 data->m = NULL; 2048 } 2049 2050 wakeup(&ring->cmd[desc->idx]); 2051 } 2052 2053 static void 2054 wpi_notif_intr(struct wpi_softc *sc) 2055 { 2056 struct ifnet *ifp = sc->sc_ifp; 2057 struct ieee80211com *ic = ifp->if_l2com; 2058 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2059 uint32_t hw; 2060 2061 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2062 BUS_DMASYNC_POSTREAD); 2063 2064 hw = le32toh(sc->shared->next); 2065 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2066 2067 while (sc->rxq.cur != hw) { 2068 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2069 2070 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2071 struct wpi_rx_desc *desc; 2072 2073 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2074 BUS_DMASYNC_POSTREAD); 2075 desc = mtod(data->m, struct wpi_rx_desc *); 2076 2077 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2078 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2079 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2080 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2081 2082 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2083 /* Reply to a command. */ 2084 wpi_cmd_done(sc, desc); 2085 } 2086 2087 switch (desc->type) { 2088 case WPI_RX_DONE: 2089 /* An 802.11 frame has been received. */ 2090 wpi_rx_done(sc, desc, data); 2091 2092 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2093 /* wpi_stop() was called. */ 2094 return; 2095 } 2096 2097 break; 2098 2099 case WPI_TX_DONE: 2100 /* An 802.11 frame has been transmitted. */ 2101 wpi_tx_done(sc, desc); 2102 break; 2103 2104 case WPI_RX_STATISTICS: 2105 case WPI_BEACON_STATISTICS: 2106 wpi_rx_statistics(sc, desc, data); 2107 break; 2108 2109 case WPI_BEACON_MISSED: 2110 { 2111 struct wpi_beacon_missed *miss = 2112 (struct wpi_beacon_missed *)(desc + 1); 2113 uint32_t misses; 2114 2115 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2116 BUS_DMASYNC_POSTREAD); 2117 misses = le32toh(miss->consecutive); 2118 2119 DPRINTF(sc, WPI_DEBUG_STATE, 2120 "%s: beacons missed %d/%d\n", __func__, misses, 2121 le32toh(miss->total)); 2122 2123 if (vap->iv_state == IEEE80211_S_RUN && 2124 (ic->ic_flags & IEEE80211_F_SCAN) == 0 && 2125 misses >= vap->iv_bmissthreshold) 2126 ieee80211_beacon_miss(ic); 2127 2128 break; 2129 } 2130 case WPI_UC_READY: 2131 { 2132 struct wpi_ucode_info *uc = 2133 (struct wpi_ucode_info *)(desc + 1); 2134 2135 /* The microcontroller is ready. */ 2136 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2137 BUS_DMASYNC_POSTREAD); 2138 DPRINTF(sc, WPI_DEBUG_RESET, 2139 "microcode alive notification version=%d.%d " 2140 "subtype=%x alive=%x\n", uc->major, uc->minor, 2141 uc->subtype, le32toh(uc->valid)); 2142 2143 if (le32toh(uc->valid) != 1) { 2144 device_printf(sc->sc_dev, 2145 "microcontroller initialization failed\n"); 2146 wpi_stop_locked(sc); 2147 } 2148 /* Save the address of the error log in SRAM. */ 2149 sc->errptr = le32toh(uc->errptr); 2150 break; 2151 } 2152 case WPI_STATE_CHANGED: 2153 { 2154 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2155 BUS_DMASYNC_POSTREAD); 2156 2157 uint32_t *status = (uint32_t *)(desc + 1); 2158 2159 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2160 le32toh(*status)); 2161 2162 if (le32toh(*status) & 1) { 2163 WPI_NT_LOCK(sc); 2164 wpi_clear_node_table(sc); 2165 WPI_NT_UNLOCK(sc); 2166 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2167 return; 2168 } 2169 break; 2170 } 2171 case WPI_START_SCAN: 2172 { 2173 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2174 BUS_DMASYNC_POSTREAD); 2175 #ifdef WPI_DEBUG 2176 struct wpi_start_scan *scan = 2177 (struct wpi_start_scan *)(desc + 1); 2178 DPRINTF(sc, WPI_DEBUG_SCAN, 2179 "%s: scanning channel %d status %x\n", 2180 __func__, scan->chan, le32toh(scan->status)); 2181 #endif 2182 break; 2183 } 2184 case WPI_STOP_SCAN: 2185 { 2186 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2187 BUS_DMASYNC_POSTREAD); 2188 #ifdef WPI_DEBUG 2189 struct wpi_stop_scan *scan = 2190 (struct wpi_stop_scan *)(desc + 1); 2191 DPRINTF(sc, WPI_DEBUG_SCAN, 2192 "scan finished nchan=%d status=%d chan=%d\n", 2193 scan->nchan, scan->status, scan->chan); 2194 #endif 2195 WPI_RXON_LOCK(sc); 2196 callout_stop(&sc->scan_timeout); 2197 WPI_RXON_UNLOCK(sc); 2198 ieee80211_scan_next(vap); 2199 break; 2200 } 2201 } 2202 2203 if (sc->rxq.cur % 8 == 0) { 2204 /* Tell the firmware what we have processed. */ 2205 wpi_update_rx_ring(sc); 2206 } 2207 } 2208 } 2209 2210 /* 2211 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2212 * from power-down sleep mode. 2213 */ 2214 static void 2215 wpi_wakeup_intr(struct wpi_softc *sc) 2216 { 2217 int qid; 2218 2219 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2220 "%s: ucode wakeup from power-down sleep\n", __func__); 2221 2222 /* Wakeup RX and TX rings. */ 2223 if (sc->rxq.update) { 2224 sc->rxq.update = 0; 2225 wpi_update_rx_ring(sc); 2226 } 2227 WPI_TXQ_LOCK(sc); 2228 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2229 struct wpi_tx_ring *ring = &sc->txq[qid]; 2230 2231 if (ring->update) { 2232 ring->update = 0; 2233 wpi_update_tx_ring(sc, ring); 2234 } 2235 } 2236 WPI_TXQ_UNLOCK(sc); 2237 2238 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2239 } 2240 2241 /* 2242 * This function prints firmware registers 2243 */ 2244 #ifdef WPI_DEBUG 2245 static void 2246 wpi_debug_registers(struct wpi_softc *sc) 2247 { 2248 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 2249 int i; 2250 static const uint32_t csr_tbl[] = { 2251 WPI_HW_IF_CONFIG, 2252 WPI_INT, 2253 WPI_INT_MASK, 2254 WPI_FH_INT, 2255 WPI_GPIO_IN, 2256 WPI_RESET, 2257 WPI_GP_CNTRL, 2258 WPI_EEPROM, 2259 WPI_EEPROM_GP, 2260 WPI_GIO, 2261 WPI_UCODE_GP1, 2262 WPI_UCODE_GP2, 2263 WPI_GIO_CHICKEN, 2264 WPI_ANA_PLL, 2265 WPI_DBG_HPET_MEM, 2266 }; 2267 static const uint32_t prph_tbl[] = { 2268 WPI_APMG_CLK_CTRL, 2269 WPI_APMG_PS, 2270 WPI_APMG_PCI_STT, 2271 WPI_APMG_RFKILL, 2272 }; 2273 2274 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2275 2276 for (i = 0; i < COUNTOF(csr_tbl); i++) { 2277 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2278 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2279 2280 if ((i + 1) % 2 == 0) 2281 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2282 } 2283 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2284 2285 if (wpi_nic_lock(sc) == 0) { 2286 for (i = 0; i < COUNTOF(prph_tbl); i++) { 2287 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2288 wpi_get_prph_string(prph_tbl[i]), 2289 wpi_prph_read(sc, prph_tbl[i])); 2290 2291 if ((i + 1) % 2 == 0) 2292 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2293 } 2294 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2295 wpi_nic_unlock(sc); 2296 } else { 2297 DPRINTF(sc, WPI_DEBUG_REGISTER, 2298 "Cannot access internal registers.\n"); 2299 } 2300 #undef COUNTOF 2301 } 2302 #endif 2303 2304 /* 2305 * Dump the error log of the firmware when a firmware panic occurs. Although 2306 * we can't debug the firmware because it is neither open source nor free, it 2307 * can help us to identify certain classes of problems. 2308 */ 2309 static void 2310 wpi_fatal_intr(struct wpi_softc *sc) 2311 { 2312 struct wpi_fw_dump dump; 2313 uint32_t i, offset, count; 2314 const uint32_t size_errmsg = 2315 (sizeof (wpi_fw_errmsg) / sizeof ((wpi_fw_errmsg)[0])); 2316 2317 /* Check that the error log address is valid. */ 2318 if (sc->errptr < WPI_FW_DATA_BASE || 2319 sc->errptr + sizeof (dump) > 2320 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2321 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2322 sc->errptr); 2323 return; 2324 } 2325 if (wpi_nic_lock(sc) != 0) { 2326 printf("%s: could not read firmware error log\n", __func__); 2327 return; 2328 } 2329 /* Read number of entries in the log. */ 2330 count = wpi_mem_read(sc, sc->errptr); 2331 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2332 printf("%s: invalid count field (count = %u)\n", __func__, 2333 count); 2334 wpi_nic_unlock(sc); 2335 return; 2336 } 2337 /* Skip "count" field. */ 2338 offset = sc->errptr + sizeof (uint32_t); 2339 printf("firmware error log (count = %u):\n", count); 2340 for (i = 0; i < count; i++) { 2341 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2342 sizeof (dump) / sizeof (uint32_t)); 2343 2344 printf(" error type = \"%s\" (0x%08X)\n", 2345 (dump.desc < size_errmsg) ? 2346 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2347 dump.desc); 2348 printf(" error data = 0x%08X\n", 2349 dump.data); 2350 printf(" branch link = 0x%08X%08X\n", 2351 dump.blink[0], dump.blink[1]); 2352 printf(" interrupt link = 0x%08X%08X\n", 2353 dump.ilink[0], dump.ilink[1]); 2354 printf(" time = %u\n", dump.time); 2355 2356 offset += sizeof (dump); 2357 } 2358 wpi_nic_unlock(sc); 2359 /* Dump driver status (TX and RX rings) while we're here. */ 2360 printf("driver status:\n"); 2361 WPI_TXQ_LOCK(sc); 2362 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2363 struct wpi_tx_ring *ring = &sc->txq[i]; 2364 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2365 i, ring->qid, ring->cur, ring->queued); 2366 } 2367 WPI_TXQ_UNLOCK(sc); 2368 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2369 } 2370 2371 static void 2372 wpi_intr(void *arg) 2373 { 2374 struct wpi_softc *sc = arg; 2375 struct ifnet *ifp = sc->sc_ifp; 2376 uint32_t r1, r2; 2377 2378 WPI_LOCK(sc); 2379 2380 /* Disable interrupts. */ 2381 WPI_WRITE(sc, WPI_INT_MASK, 0); 2382 2383 r1 = WPI_READ(sc, WPI_INT); 2384 2385 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2386 goto end; /* Hardware gone! */ 2387 2388 r2 = WPI_READ(sc, WPI_FH_INT); 2389 2390 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2391 r1, r2); 2392 2393 if (r1 == 0 && r2 == 0) 2394 goto done; /* Interrupt not for us. */ 2395 2396 /* Acknowledge interrupts. */ 2397 WPI_WRITE(sc, WPI_INT, r1); 2398 WPI_WRITE(sc, WPI_FH_INT, r2); 2399 2400 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2401 device_printf(sc->sc_dev, "fatal firmware error\n"); 2402 #ifdef WPI_DEBUG 2403 wpi_debug_registers(sc); 2404 #endif 2405 wpi_fatal_intr(sc); 2406 DPRINTF(sc, WPI_DEBUG_HW, 2407 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2408 "(Hardware Error)"); 2409 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2410 goto end; 2411 } 2412 2413 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2414 (r2 & WPI_FH_INT_RX)) 2415 wpi_notif_intr(sc); 2416 2417 if (r1 & WPI_INT_ALIVE) 2418 wakeup(sc); /* Firmware is alive. */ 2419 2420 if (r1 & WPI_INT_WAKEUP) 2421 wpi_wakeup_intr(sc); 2422 2423 done: 2424 /* Re-enable interrupts. */ 2425 if (ifp->if_flags & IFF_UP) 2426 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2427 2428 end: WPI_UNLOCK(sc); 2429 } 2430 2431 static int 2432 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2433 { 2434 struct ifnet *ifp = sc->sc_ifp; 2435 struct ieee80211_frame *wh; 2436 struct wpi_tx_cmd *cmd; 2437 struct wpi_tx_data *data; 2438 struct wpi_tx_desc *desc; 2439 struct wpi_tx_ring *ring; 2440 struct mbuf *m1; 2441 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2442 int error, i, hdrlen, nsegs, totlen, pad; 2443 2444 WPI_TXQ_LOCK(sc); 2445 2446 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2447 2448 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2449 2450 if (sc->txq_active == 0) { 2451 /* wpi_stop() was called */ 2452 error = ENETDOWN; 2453 goto fail; 2454 } 2455 2456 wh = mtod(buf->m, struct ieee80211_frame *); 2457 hdrlen = ieee80211_anyhdrsize(wh); 2458 totlen = buf->m->m_pkthdr.len; 2459 2460 if (hdrlen & 3) { 2461 /* First segment length must be a multiple of 4. */ 2462 pad = 4 - (hdrlen & 3); 2463 } else 2464 pad = 0; 2465 2466 ring = &sc->txq[buf->ac]; 2467 desc = &ring->desc[ring->cur]; 2468 data = &ring->data[ring->cur]; 2469 2470 /* Prepare TX firmware command. */ 2471 cmd = &ring->cmd[ring->cur]; 2472 cmd->code = buf->code; 2473 cmd->flags = 0; 2474 cmd->qid = ring->qid; 2475 cmd->idx = ring->cur; 2476 2477 memcpy(cmd->data, buf->data, buf->size); 2478 2479 /* Save and trim IEEE802.11 header. */ 2480 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2481 m_adj(buf->m, hdrlen); 2482 2483 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2484 segs, &nsegs, BUS_DMA_NOWAIT); 2485 if (error != 0 && error != EFBIG) { 2486 device_printf(sc->sc_dev, 2487 "%s: can't map mbuf (error %d)\n", __func__, error); 2488 goto fail; 2489 } 2490 if (error != 0) { 2491 /* Too many DMA segments, linearize mbuf. */ 2492 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2493 if (m1 == NULL) { 2494 device_printf(sc->sc_dev, 2495 "%s: could not defrag mbuf\n", __func__); 2496 error = ENOBUFS; 2497 goto fail; 2498 } 2499 buf->m = m1; 2500 2501 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2502 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2503 if (error != 0) { 2504 device_printf(sc->sc_dev, 2505 "%s: can't map mbuf (error %d)\n", __func__, 2506 error); 2507 goto fail; 2508 } 2509 } 2510 2511 KASSERT(nsegs < WPI_MAX_SCATTER, 2512 ("too many DMA segments, nsegs (%d) should be less than %d", 2513 nsegs, WPI_MAX_SCATTER)); 2514 2515 data->m = buf->m; 2516 data->ni = buf->ni; 2517 2518 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2519 __func__, ring->qid, ring->cur, totlen, nsegs); 2520 2521 /* Fill TX descriptor. */ 2522 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2523 /* First DMA segment is used by the TX command. */ 2524 desc->segs[0].addr = htole32(data->cmd_paddr); 2525 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2526 /* Other DMA segments are for data payload. */ 2527 seg = &segs[0]; 2528 for (i = 1; i <= nsegs; i++) { 2529 desc->segs[i].addr = htole32(seg->ds_addr); 2530 desc->segs[i].len = htole32(seg->ds_len); 2531 seg++; 2532 } 2533 2534 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2535 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2536 BUS_DMASYNC_PREWRITE); 2537 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2538 BUS_DMASYNC_PREWRITE); 2539 2540 /* Kick TX ring. */ 2541 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2542 wpi_update_tx_ring(sc, ring); 2543 2544 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2545 /* Mark TX ring as full if we reach a certain threshold. */ 2546 WPI_TXQ_STATE_LOCK(sc); 2547 if (++ring->queued > WPI_TX_RING_HIMARK) { 2548 sc->qfullmsk |= 1 << ring->qid; 2549 2550 IF_LOCK(&ifp->if_snd); 2551 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2552 IF_UNLOCK(&ifp->if_snd); 2553 } 2554 2555 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2556 WPI_TXQ_STATE_UNLOCK(sc); 2557 } 2558 2559 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2560 2561 WPI_TXQ_UNLOCK(sc); 2562 2563 return 0; 2564 2565 fail: m_freem(buf->m); 2566 2567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2568 2569 WPI_TXQ_UNLOCK(sc); 2570 2571 return error; 2572 } 2573 2574 /* 2575 * Construct the data packet for a transmit buffer. 2576 */ 2577 static int 2578 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2579 { 2580 const struct ieee80211_txparam *tp; 2581 struct ieee80211vap *vap = ni->ni_vap; 2582 struct ieee80211com *ic = ni->ni_ic; 2583 struct wpi_node *wn = WPI_NODE(ni); 2584 struct ieee80211_channel *chan; 2585 struct ieee80211_frame *wh; 2586 struct ieee80211_key *k = NULL; 2587 struct wpi_buf tx_data; 2588 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2589 uint32_t flags; 2590 uint16_t qos; 2591 uint8_t tid, type; 2592 int ac, error, swcrypt, rate, ismcast, totlen; 2593 2594 wh = mtod(m, struct ieee80211_frame *); 2595 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2596 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2597 2598 /* Select EDCA Access Category and TX ring for this frame. */ 2599 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2600 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2601 tid = qos & IEEE80211_QOS_TID; 2602 } else { 2603 qos = 0; 2604 tid = 0; 2605 } 2606 ac = M_WME_GETAC(m); 2607 2608 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2609 ni->ni_chan : ic->ic_curchan; 2610 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2611 2612 /* Choose a TX rate index. */ 2613 if (type == IEEE80211_FC0_TYPE_MGT) 2614 rate = tp->mgmtrate; 2615 else if (ismcast) 2616 rate = tp->mcastrate; 2617 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2618 rate = tp->ucastrate; 2619 else if (m->m_flags & M_EAPOL) 2620 rate = tp->mgmtrate; 2621 else { 2622 /* XXX pass pktlen */ 2623 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2624 rate = ni->ni_txrate; 2625 } 2626 2627 /* Encrypt the frame if need be. */ 2628 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2629 /* Retrieve key for TX. */ 2630 k = ieee80211_crypto_encap(ni, m); 2631 if (k == NULL) { 2632 error = ENOBUFS; 2633 goto fail; 2634 } 2635 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2636 2637 /* 802.11 header may have moved. */ 2638 wh = mtod(m, struct ieee80211_frame *); 2639 } 2640 totlen = m->m_pkthdr.len; 2641 2642 if (ieee80211_radiotap_active_vap(vap)) { 2643 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2644 2645 tap->wt_flags = 0; 2646 tap->wt_rate = rate; 2647 if (k != NULL) 2648 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2649 2650 ieee80211_radiotap_tx(vap, m); 2651 } 2652 2653 flags = 0; 2654 if (!ismcast) { 2655 /* Unicast frame, check if an ACK is expected. */ 2656 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2657 IEEE80211_QOS_ACKPOLICY_NOACK) 2658 flags |= WPI_TX_NEED_ACK; 2659 } 2660 2661 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2662 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2663 2664 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2665 if (!ismcast) { 2666 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2667 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2668 flags |= WPI_TX_NEED_RTS; 2669 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2670 WPI_RATE_IS_OFDM(rate)) { 2671 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2672 flags |= WPI_TX_NEED_CTS; 2673 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2674 flags |= WPI_TX_NEED_RTS; 2675 } 2676 2677 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2678 flags |= WPI_TX_FULL_TXOP; 2679 } 2680 2681 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2682 if (type == IEEE80211_FC0_TYPE_MGT) { 2683 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2684 2685 /* Tell HW to set timestamp in probe responses. */ 2686 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2687 flags |= WPI_TX_INSERT_TSTAMP; 2688 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2689 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2690 tx->timeout = htole16(3); 2691 else 2692 tx->timeout = htole16(2); 2693 } 2694 2695 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2696 tx->id = WPI_ID_BROADCAST; 2697 else { 2698 if (wn->id == WPI_ID_UNDEFINED) { 2699 device_printf(sc->sc_dev, 2700 "%s: undefined node id\n", __func__); 2701 error = EINVAL; 2702 goto fail; 2703 } 2704 2705 tx->id = wn->id; 2706 } 2707 2708 if (type != IEEE80211_FC0_TYPE_MGT) 2709 tx->data_ntries = tp->maxretry; 2710 2711 if (k != NULL && !swcrypt) { 2712 switch (k->wk_cipher->ic_cipher) { 2713 case IEEE80211_CIPHER_AES_CCM: 2714 tx->security = WPI_CIPHER_CCMP; 2715 break; 2716 2717 default: 2718 break; 2719 } 2720 2721 memcpy(tx->key, k->wk_key, k->wk_keylen); 2722 } 2723 2724 tx->len = htole16(totlen); 2725 tx->flags = htole32(flags); 2726 tx->plcp = rate2plcp(rate); 2727 tx->tid = tid; 2728 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2729 tx->ofdm_mask = 0xff; 2730 tx->cck_mask = 0x0f; 2731 tx->rts_ntries = 7; 2732 2733 tx_data.ni = ni; 2734 tx_data.m = m; 2735 tx_data.size = sizeof(struct wpi_cmd_data); 2736 tx_data.code = WPI_CMD_TX_DATA; 2737 tx_data.ac = ac; 2738 2739 return wpi_cmd2(sc, &tx_data); 2740 2741 fail: m_freem(m); 2742 return error; 2743 } 2744 2745 static int 2746 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2747 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2748 { 2749 struct ieee80211vap *vap = ni->ni_vap; 2750 struct ieee80211_key *k = NULL; 2751 struct ieee80211_frame *wh; 2752 struct wpi_buf tx_data; 2753 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2754 uint32_t flags; 2755 uint8_t type; 2756 int ac, rate, swcrypt, totlen; 2757 2758 wh = mtod(m, struct ieee80211_frame *); 2759 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2760 2761 ac = params->ibp_pri & 3; 2762 2763 /* Choose a TX rate index. */ 2764 rate = params->ibp_rate0; 2765 2766 flags = 0; 2767 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2768 flags |= WPI_TX_NEED_ACK; 2769 if (params->ibp_flags & IEEE80211_BPF_RTS) 2770 flags |= WPI_TX_NEED_RTS; 2771 if (params->ibp_flags & IEEE80211_BPF_CTS) 2772 flags |= WPI_TX_NEED_CTS; 2773 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2774 flags |= WPI_TX_FULL_TXOP; 2775 2776 /* Encrypt the frame if need be. */ 2777 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2778 /* Retrieve key for TX. */ 2779 k = ieee80211_crypto_encap(ni, m); 2780 if (k == NULL) { 2781 m_freem(m); 2782 return ENOBUFS; 2783 } 2784 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2785 2786 /* 802.11 header may have moved. */ 2787 wh = mtod(m, struct ieee80211_frame *); 2788 } 2789 totlen = m->m_pkthdr.len; 2790 2791 if (ieee80211_radiotap_active_vap(vap)) { 2792 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2793 2794 tap->wt_flags = 0; 2795 tap->wt_rate = rate; 2796 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2797 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2798 2799 ieee80211_radiotap_tx(vap, m); 2800 } 2801 2802 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2803 if (type == IEEE80211_FC0_TYPE_MGT) { 2804 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2805 2806 /* Tell HW to set timestamp in probe responses. */ 2807 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2808 flags |= WPI_TX_INSERT_TSTAMP; 2809 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2810 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2811 tx->timeout = htole16(3); 2812 else 2813 tx->timeout = htole16(2); 2814 } 2815 2816 if (k != NULL && !swcrypt) { 2817 switch (k->wk_cipher->ic_cipher) { 2818 case IEEE80211_CIPHER_AES_CCM: 2819 tx->security = WPI_CIPHER_CCMP; 2820 break; 2821 2822 default: 2823 break; 2824 } 2825 2826 memcpy(tx->key, k->wk_key, k->wk_keylen); 2827 } 2828 2829 tx->len = htole16(totlen); 2830 tx->flags = htole32(flags); 2831 tx->plcp = rate2plcp(rate); 2832 tx->id = WPI_ID_BROADCAST; 2833 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2834 tx->rts_ntries = params->ibp_try1; 2835 tx->data_ntries = params->ibp_try0; 2836 2837 tx_data.ni = ni; 2838 tx_data.m = m; 2839 tx_data.size = sizeof(struct wpi_cmd_data); 2840 tx_data.code = WPI_CMD_TX_DATA; 2841 tx_data.ac = ac; 2842 2843 return wpi_cmd2(sc, &tx_data); 2844 } 2845 2846 static int 2847 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2848 const struct ieee80211_bpf_params *params) 2849 { 2850 struct ieee80211com *ic = ni->ni_ic; 2851 struct ifnet *ifp = ic->ic_ifp; 2852 struct wpi_softc *sc = ifp->if_softc; 2853 int error = 0; 2854 2855 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2856 2857 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2858 ieee80211_free_node(ni); 2859 m_freem(m); 2860 return ENETDOWN; 2861 } 2862 2863 WPI_TX_LOCK(sc); 2864 if (params == NULL) { 2865 /* 2866 * Legacy path; interpret frame contents to decide 2867 * precisely how to send the frame. 2868 */ 2869 error = wpi_tx_data(sc, m, ni); 2870 } else { 2871 /* 2872 * Caller supplied explicit parameters to use in 2873 * sending the frame. 2874 */ 2875 error = wpi_tx_data_raw(sc, m, ni, params); 2876 } 2877 WPI_TX_UNLOCK(sc); 2878 2879 if (error != 0) { 2880 /* NB: m is reclaimed on tx failure */ 2881 ieee80211_free_node(ni); 2882 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2883 2884 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2885 2886 return error; 2887 } 2888 2889 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2890 2891 return 0; 2892 } 2893 2894 /** 2895 * Process data waiting to be sent on the IFNET output queue 2896 */ 2897 static void 2898 wpi_start(struct ifnet *ifp) 2899 { 2900 struct wpi_softc *sc = ifp->if_softc; 2901 struct ieee80211_node *ni; 2902 struct mbuf *m; 2903 2904 WPI_TX_LOCK(sc); 2905 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 2906 2907 for (;;) { 2908 IF_LOCK(&ifp->if_snd); 2909 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2910 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2911 IF_UNLOCK(&ifp->if_snd); 2912 break; 2913 } 2914 IF_UNLOCK(&ifp->if_snd); 2915 2916 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 2917 if (m == NULL) 2918 break; 2919 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 2920 if (wpi_tx_data(sc, m, ni) != 0) { 2921 ieee80211_free_node(ni); 2922 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2923 } 2924 } 2925 2926 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 2927 WPI_TX_UNLOCK(sc); 2928 } 2929 2930 static void 2931 wpi_start_task(void *arg0, int pending) 2932 { 2933 struct wpi_softc *sc = arg0; 2934 struct ifnet *ifp = sc->sc_ifp; 2935 2936 wpi_start(ifp); 2937 } 2938 2939 static void 2940 wpi_watchdog_rfkill(void *arg) 2941 { 2942 struct wpi_softc *sc = arg; 2943 struct ifnet *ifp = sc->sc_ifp; 2944 struct ieee80211com *ic = ifp->if_l2com; 2945 2946 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 2947 2948 /* No need to lock firmware memory. */ 2949 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 2950 /* Radio kill switch is still off. */ 2951 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 2952 sc); 2953 } else 2954 ieee80211_runtask(ic, &sc->sc_radioon_task); 2955 } 2956 2957 static void 2958 wpi_scan_timeout(void *arg) 2959 { 2960 struct wpi_softc *sc = arg; 2961 struct ifnet *ifp = sc->sc_ifp; 2962 2963 if_printf(ifp, "scan timeout\n"); 2964 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2965 } 2966 2967 static void 2968 wpi_tx_timeout(void *arg) 2969 { 2970 struct wpi_softc *sc = arg; 2971 struct ifnet *ifp = sc->sc_ifp; 2972 2973 if_printf(ifp, "device timeout\n"); 2974 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2975 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2976 } 2977 2978 static int 2979 wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2980 { 2981 struct wpi_softc *sc = ifp->if_softc; 2982 struct ieee80211com *ic = ifp->if_l2com; 2983 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2984 struct ifreq *ifr = (struct ifreq *) data; 2985 int error = 0; 2986 2987 switch (cmd) { 2988 case SIOCGIFADDR: 2989 error = ether_ioctl(ifp, cmd, data); 2990 break; 2991 case SIOCSIFFLAGS: 2992 if (ifp->if_flags & IFF_UP) { 2993 wpi_init(sc); 2994 2995 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && 2996 vap != NULL) 2997 ieee80211_stop(vap); 2998 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2999 wpi_stop(sc); 3000 break; 3001 case SIOCGIFMEDIA: 3002 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3003 break; 3004 default: 3005 error = EINVAL; 3006 break; 3007 } 3008 return error; 3009 } 3010 3011 /* 3012 * Send a command to the firmware. 3013 */ 3014 static int 3015 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3016 int async) 3017 { 3018 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3019 struct wpi_tx_desc *desc; 3020 struct wpi_tx_data *data; 3021 struct wpi_tx_cmd *cmd; 3022 struct mbuf *m; 3023 bus_addr_t paddr; 3024 int totlen, error; 3025 3026 WPI_TXQ_LOCK(sc); 3027 3028 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3029 3030 if (sc->txq_active == 0) { 3031 /* wpi_stop() was called */ 3032 error = 0; 3033 goto fail; 3034 } 3035 3036 if (async == 0) 3037 WPI_LOCK_ASSERT(sc); 3038 3039 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3040 __func__, wpi_cmd_str(code), size, async); 3041 3042 desc = &ring->desc[ring->cur]; 3043 data = &ring->data[ring->cur]; 3044 totlen = 4 + size; 3045 3046 if (size > sizeof cmd->data) { 3047 /* Command is too large to fit in a descriptor. */ 3048 if (totlen > MCLBYTES) { 3049 error = EINVAL; 3050 goto fail; 3051 } 3052 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3053 if (m == NULL) { 3054 error = ENOMEM; 3055 goto fail; 3056 } 3057 cmd = mtod(m, struct wpi_tx_cmd *); 3058 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3059 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3060 if (error != 0) { 3061 m_freem(m); 3062 goto fail; 3063 } 3064 data->m = m; 3065 } else { 3066 cmd = &ring->cmd[ring->cur]; 3067 paddr = data->cmd_paddr; 3068 } 3069 3070 cmd->code = code; 3071 cmd->flags = 0; 3072 cmd->qid = ring->qid; 3073 cmd->idx = ring->cur; 3074 memcpy(cmd->data, buf, size); 3075 3076 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3077 desc->segs[0].addr = htole32(paddr); 3078 desc->segs[0].len = htole32(totlen); 3079 3080 if (size > sizeof cmd->data) { 3081 bus_dmamap_sync(ring->data_dmat, data->map, 3082 BUS_DMASYNC_PREWRITE); 3083 } else { 3084 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3085 BUS_DMASYNC_PREWRITE); 3086 } 3087 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3088 BUS_DMASYNC_PREWRITE); 3089 3090 /* Kick command ring. */ 3091 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3092 wpi_update_tx_ring(sc, ring); 3093 3094 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3095 3096 WPI_TXQ_UNLOCK(sc); 3097 3098 if (async) 3099 return 0; 3100 3101 return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3102 3103 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3104 3105 WPI_TXQ_UNLOCK(sc); 3106 3107 return error; 3108 } 3109 3110 /* 3111 * Configure HW multi-rate retries. 3112 */ 3113 static int 3114 wpi_mrr_setup(struct wpi_softc *sc) 3115 { 3116 struct ifnet *ifp = sc->sc_ifp; 3117 struct ieee80211com *ic = ifp->if_l2com; 3118 struct wpi_mrr_setup mrr; 3119 int i, error; 3120 3121 /* CCK rates (not used with 802.11a). */ 3122 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3123 mrr.rates[i].flags = 0; 3124 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3125 /* Fallback to the immediate lower CCK rate (if any.) */ 3126 mrr.rates[i].next = 3127 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3128 /* Try one time at this rate before falling back to "next". */ 3129 mrr.rates[i].ntries = 1; 3130 } 3131 /* OFDM rates (not used with 802.11b). */ 3132 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3133 mrr.rates[i].flags = 0; 3134 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3135 /* Fallback to the immediate lower rate (if any.) */ 3136 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3137 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3138 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3139 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3140 i - 1; 3141 /* Try one time at this rate before falling back to "next". */ 3142 mrr.rates[i].ntries = 1; 3143 } 3144 /* Setup MRR for control frames. */ 3145 mrr.which = htole32(WPI_MRR_CTL); 3146 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3147 if (error != 0) { 3148 device_printf(sc->sc_dev, 3149 "could not setup MRR for control frames\n"); 3150 return error; 3151 } 3152 /* Setup MRR for data frames. */ 3153 mrr.which = htole32(WPI_MRR_DATA); 3154 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3155 if (error != 0) { 3156 device_printf(sc->sc_dev, 3157 "could not setup MRR for data frames\n"); 3158 return error; 3159 } 3160 return 0; 3161 } 3162 3163 static int 3164 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3165 { 3166 struct ieee80211com *ic = ni->ni_ic; 3167 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3168 struct wpi_node *wn = WPI_NODE(ni); 3169 struct wpi_node_info node; 3170 int error; 3171 3172 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3173 3174 if (wn->id == WPI_ID_UNDEFINED) 3175 return EINVAL; 3176 3177 memset(&node, 0, sizeof node); 3178 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3179 node.id = wn->id; 3180 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3181 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3182 node.action = htole32(WPI_ACTION_SET_RATE); 3183 node.antenna = WPI_ANTENNA_BOTH; 3184 3185 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3186 wn->id, ether_sprintf(ni->ni_macaddr)); 3187 3188 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3189 if (error != 0) { 3190 device_printf(sc->sc_dev, 3191 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3192 error); 3193 return error; 3194 } 3195 3196 if (wvp->wv_gtk != 0) { 3197 error = wpi_set_global_keys(ni); 3198 if (error != 0) { 3199 device_printf(sc->sc_dev, 3200 "%s: error while setting global keys\n", __func__); 3201 return ENXIO; 3202 } 3203 } 3204 3205 return 0; 3206 } 3207 3208 /* 3209 * Broadcast node is used to send group-addressed and management frames. 3210 */ 3211 static int 3212 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3213 { 3214 struct ifnet *ifp = sc->sc_ifp; 3215 struct ieee80211com *ic = ifp->if_l2com; 3216 struct wpi_node_info node; 3217 3218 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3219 3220 memset(&node, 0, sizeof node); 3221 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3222 node.id = WPI_ID_BROADCAST; 3223 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3224 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3225 node.action = htole32(WPI_ACTION_SET_RATE); 3226 node.antenna = WPI_ANTENNA_BOTH; 3227 3228 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3229 3230 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3231 } 3232 3233 static int 3234 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3235 { 3236 struct wpi_node *wn = WPI_NODE(ni); 3237 int error; 3238 3239 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3240 3241 wn->id = wpi_add_node_entry_sta(sc); 3242 3243 if ((error = wpi_add_node(sc, ni)) != 0) { 3244 wpi_del_node_entry(sc, wn->id); 3245 wn->id = WPI_ID_UNDEFINED; 3246 return error; 3247 } 3248 3249 return 0; 3250 } 3251 3252 static int 3253 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3254 { 3255 struct wpi_node *wn = WPI_NODE(ni); 3256 int error; 3257 3258 KASSERT(wn->id == WPI_ID_UNDEFINED, 3259 ("the node %d was added before", wn->id)); 3260 3261 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3262 3263 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3264 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3265 return ENOMEM; 3266 } 3267 3268 if ((error = wpi_add_node(sc, ni)) != 0) { 3269 wpi_del_node_entry(sc, wn->id); 3270 wn->id = WPI_ID_UNDEFINED; 3271 return error; 3272 } 3273 3274 return 0; 3275 } 3276 3277 static void 3278 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3279 { 3280 struct wpi_node *wn = WPI_NODE(ni); 3281 struct wpi_cmd_del_node node; 3282 int error; 3283 3284 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3285 3286 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3287 3288 memset(&node, 0, sizeof node); 3289 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3290 node.count = 1; 3291 3292 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3293 wn->id, ether_sprintf(ni->ni_macaddr)); 3294 3295 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3296 if (error != 0) { 3297 device_printf(sc->sc_dev, 3298 "%s: could not delete node %u, error %d\n", __func__, 3299 wn->id, error); 3300 } 3301 } 3302 3303 static int 3304 wpi_updateedca(struct ieee80211com *ic) 3305 { 3306 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3307 struct wpi_softc *sc = ic->ic_ifp->if_softc; 3308 struct wpi_edca_params cmd; 3309 int aci, error; 3310 3311 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3312 3313 memset(&cmd, 0, sizeof cmd); 3314 cmd.flags = htole32(WPI_EDCA_UPDATE); 3315 for (aci = 0; aci < WME_NUM_AC; aci++) { 3316 const struct wmeParams *ac = 3317 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3318 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3319 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3320 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3321 cmd.ac[aci].txoplimit = 3322 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3323 3324 DPRINTF(sc, WPI_DEBUG_EDCA, 3325 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3326 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3327 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3328 cmd.ac[aci].txoplimit); 3329 } 3330 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3331 3332 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3333 3334 return error; 3335 #undef WPI_EXP2 3336 } 3337 3338 static void 3339 wpi_set_promisc(struct wpi_softc *sc) 3340 { 3341 struct ifnet *ifp = sc->sc_ifp; 3342 struct ieee80211com *ic = ifp->if_l2com; 3343 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3344 uint32_t promisc_filter; 3345 3346 promisc_filter = WPI_FILTER_CTL; 3347 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3348 promisc_filter |= WPI_FILTER_PROMISC; 3349 3350 if (ifp->if_flags & IFF_PROMISC) 3351 sc->rxon.filter |= htole32(promisc_filter); 3352 else 3353 sc->rxon.filter &= ~htole32(promisc_filter); 3354 } 3355 3356 static void 3357 wpi_update_promisc(struct ifnet *ifp) 3358 { 3359 struct wpi_softc *sc = ifp->if_softc; 3360 3361 WPI_RXON_LOCK(sc); 3362 wpi_set_promisc(sc); 3363 3364 if (wpi_send_rxon(sc, 1, 1) != 0) { 3365 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3366 __func__); 3367 } 3368 WPI_RXON_UNLOCK(sc); 3369 } 3370 3371 static void 3372 wpi_update_mcast(struct ifnet *ifp) 3373 { 3374 /* Ignore */ 3375 } 3376 3377 static void 3378 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3379 { 3380 struct wpi_cmd_led led; 3381 3382 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3383 3384 led.which = which; 3385 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3386 led.off = off; 3387 led.on = on; 3388 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3389 } 3390 3391 static int 3392 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3393 { 3394 struct wpi_cmd_timing cmd; 3395 uint64_t val, mod; 3396 3397 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3398 3399 memset(&cmd, 0, sizeof cmd); 3400 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3401 cmd.bintval = htole16(ni->ni_intval); 3402 cmd.lintval = htole16(10); 3403 3404 /* Compute remaining time until next beacon. */ 3405 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3406 mod = le64toh(cmd.tstamp) % val; 3407 cmd.binitval = htole32((uint32_t)(val - mod)); 3408 3409 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3410 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3411 3412 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3413 } 3414 3415 /* 3416 * This function is called periodically (every 60 seconds) to adjust output 3417 * power to temperature changes. 3418 */ 3419 static void 3420 wpi_power_calibration(struct wpi_softc *sc) 3421 { 3422 int temp; 3423 3424 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3425 3426 /* Update sensor data. */ 3427 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3428 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3429 3430 /* Sanity-check read value. */ 3431 if (temp < -260 || temp > 25) { 3432 /* This can't be correct, ignore. */ 3433 DPRINTF(sc, WPI_DEBUG_TEMP, 3434 "out-of-range temperature reported: %d\n", temp); 3435 return; 3436 } 3437 3438 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3439 3440 /* Adjust Tx power if need be. */ 3441 if (abs(temp - sc->temp) <= 6) 3442 return; 3443 3444 sc->temp = temp; 3445 3446 if (wpi_set_txpower(sc, 1) != 0) { 3447 /* just warn, too bad for the automatic calibration... */ 3448 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3449 } 3450 } 3451 3452 /* 3453 * Set TX power for current channel. 3454 */ 3455 static int 3456 wpi_set_txpower(struct wpi_softc *sc, int async) 3457 { 3458 struct wpi_power_group *group; 3459 struct wpi_cmd_txpower cmd; 3460 uint8_t chan; 3461 int idx, is_chan_5ghz, i; 3462 3463 /* Retrieve current channel from last RXON. */ 3464 chan = sc->rxon.chan; 3465 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3466 3467 /* Find the TX power group to which this channel belongs. */ 3468 if (is_chan_5ghz) { 3469 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3470 if (chan <= group->chan) 3471 break; 3472 } else 3473 group = &sc->groups[0]; 3474 3475 memset(&cmd, 0, sizeof cmd); 3476 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3477 cmd.chan = htole16(chan); 3478 3479 /* Set TX power for all OFDM and CCK rates. */ 3480 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3481 /* Retrieve TX power for this channel/rate. */ 3482 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3483 3484 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3485 3486 if (is_chan_5ghz) { 3487 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3488 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3489 } else { 3490 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3491 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3492 } 3493 DPRINTF(sc, WPI_DEBUG_TEMP, 3494 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3495 } 3496 3497 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3498 } 3499 3500 /* 3501 * Determine Tx power index for a given channel/rate combination. 3502 * This takes into account the regulatory information from EEPROM and the 3503 * current temperature. 3504 */ 3505 static int 3506 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3507 uint8_t chan, int is_chan_5ghz, int ridx) 3508 { 3509 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3510 #define fdivround(a, b, n) \ 3511 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3512 3513 /* Linear interpolation. */ 3514 #define interpolate(x, x1, y1, x2, y2, n) \ 3515 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3516 3517 struct wpi_power_sample *sample; 3518 int pwr, idx; 3519 3520 /* Default TX power is group maximum TX power minus 3dB. */ 3521 pwr = group->maxpwr / 2; 3522 3523 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3524 switch (ridx) { 3525 case WPI_RIDX_OFDM36: 3526 pwr -= is_chan_5ghz ? 5 : 0; 3527 break; 3528 case WPI_RIDX_OFDM48: 3529 pwr -= is_chan_5ghz ? 10 : 7; 3530 break; 3531 case WPI_RIDX_OFDM54: 3532 pwr -= is_chan_5ghz ? 12 : 9; 3533 break; 3534 } 3535 3536 /* Never exceed the channel maximum allowed TX power. */ 3537 pwr = min(pwr, sc->maxpwr[chan]); 3538 3539 /* Retrieve TX power index into gain tables from samples. */ 3540 for (sample = group->samples; sample < &group->samples[3]; sample++) 3541 if (pwr > sample[1].power) 3542 break; 3543 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3544 idx = interpolate(pwr, sample[0].power, sample[0].index, 3545 sample[1].power, sample[1].index, 19); 3546 3547 /*- 3548 * Adjust power index based on current temperature: 3549 * - if cooler than factory-calibrated: decrease output power 3550 * - if warmer than factory-calibrated: increase output power 3551 */ 3552 idx -= (sc->temp - group->temp) * 11 / 100; 3553 3554 /* Decrease TX power for CCK rates (-5dB). */ 3555 if (ridx >= WPI_RIDX_CCK1) 3556 idx += 10; 3557 3558 /* Make sure idx stays in a valid range. */ 3559 if (idx < 0) 3560 return 0; 3561 if (idx > WPI_MAX_PWR_INDEX) 3562 return WPI_MAX_PWR_INDEX; 3563 return idx; 3564 3565 #undef interpolate 3566 #undef fdivround 3567 } 3568 3569 /* 3570 * Set STA mode power saving level (between 0 and 5). 3571 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3572 */ 3573 static int 3574 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3575 { 3576 struct wpi_pmgt_cmd cmd; 3577 const struct wpi_pmgt *pmgt; 3578 uint32_t max, skip_dtim; 3579 uint32_t reg; 3580 int i; 3581 3582 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3583 "%s: dtim=%d, level=%d, async=%d\n", 3584 __func__, dtim, level, async); 3585 3586 /* Select which PS parameters to use. */ 3587 if (dtim <= 10) 3588 pmgt = &wpi_pmgt[0][level]; 3589 else 3590 pmgt = &wpi_pmgt[1][level]; 3591 3592 memset(&cmd, 0, sizeof cmd); 3593 if (level != 0) /* not CAM */ 3594 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3595 /* Retrieve PCIe Active State Power Management (ASPM). */ 3596 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3597 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3598 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3599 3600 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3601 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3602 3603 if (dtim == 0) { 3604 dtim = 1; 3605 skip_dtim = 0; 3606 } else 3607 skip_dtim = pmgt->skip_dtim; 3608 3609 if (skip_dtim != 0) { 3610 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3611 max = pmgt->intval[4]; 3612 if (max == (uint32_t)-1) 3613 max = dtim * (skip_dtim + 1); 3614 else if (max > dtim) 3615 max = (max / dtim) * dtim; 3616 } else 3617 max = dtim; 3618 3619 for (i = 0; i < 5; i++) 3620 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3621 3622 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3623 } 3624 3625 static int 3626 wpi_send_btcoex(struct wpi_softc *sc) 3627 { 3628 struct wpi_bluetooth cmd; 3629 3630 memset(&cmd, 0, sizeof cmd); 3631 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3632 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3633 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3634 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3635 __func__); 3636 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3637 } 3638 3639 static int 3640 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3641 { 3642 int error; 3643 3644 if (async) 3645 WPI_RXON_LOCK_ASSERT(sc); 3646 3647 if (assoc && (sc->rxon.filter & htole32(WPI_FILTER_BSS))) { 3648 struct wpi_assoc rxon_assoc; 3649 3650 rxon_assoc.flags = sc->rxon.flags; 3651 rxon_assoc.filter = sc->rxon.filter; 3652 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3653 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3654 rxon_assoc.reserved = 0; 3655 3656 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3657 sizeof (struct wpi_assoc), async); 3658 if (error != 0) { 3659 device_printf(sc->sc_dev, 3660 "RXON_ASSOC command failed, error %d\n", error); 3661 return error; 3662 } 3663 } else { 3664 if (async) { 3665 WPI_NT_LOCK(sc); 3666 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3667 sizeof (struct wpi_rxon), async); 3668 if (error == 0) 3669 wpi_clear_node_table(sc); 3670 WPI_NT_UNLOCK(sc); 3671 } else { 3672 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3673 sizeof (struct wpi_rxon), async); 3674 if (error == 0) 3675 wpi_clear_node_table(sc); 3676 } 3677 3678 if (error != 0) { 3679 device_printf(sc->sc_dev, 3680 "RXON command failed, error %d\n", error); 3681 return error; 3682 } 3683 3684 /* Add broadcast node. */ 3685 error = wpi_add_broadcast_node(sc, async); 3686 if (error != 0) { 3687 device_printf(sc->sc_dev, 3688 "could not add broadcast node, error %d\n", error); 3689 return error; 3690 } 3691 } 3692 3693 /* Configuration has changed, set Tx power accordingly. */ 3694 if ((error = wpi_set_txpower(sc, async)) != 0) { 3695 device_printf(sc->sc_dev, 3696 "%s: could not set TX power, error %d\n", __func__, error); 3697 return error; 3698 } 3699 3700 return 0; 3701 } 3702 3703 /** 3704 * Configure the card to listen to a particular channel, this transisions the 3705 * card in to being able to receive frames from remote devices. 3706 */ 3707 static int 3708 wpi_config(struct wpi_softc *sc) 3709 { 3710 struct ifnet *ifp = sc->sc_ifp; 3711 struct ieee80211com *ic = ifp->if_l2com; 3712 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3713 uint32_t flags; 3714 int error; 3715 3716 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3717 3718 /* Set power saving level to CAM during initialization. */ 3719 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3720 device_printf(sc->sc_dev, 3721 "%s: could not set power saving level\n", __func__); 3722 return error; 3723 } 3724 3725 /* Configure bluetooth coexistence. */ 3726 if ((error = wpi_send_btcoex(sc)) != 0) { 3727 device_printf(sc->sc_dev, 3728 "could not configure bluetooth coexistence\n"); 3729 return error; 3730 } 3731 3732 /* Configure adapter. */ 3733 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3734 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3735 3736 /* Set default channel. */ 3737 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3738 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3739 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 3740 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3741 3742 sc->rxon.filter = WPI_FILTER_MULTICAST; 3743 switch (ic->ic_opmode) { 3744 case IEEE80211_M_STA: 3745 sc->rxon.mode = WPI_MODE_STA; 3746 break; 3747 case IEEE80211_M_IBSS: 3748 sc->rxon.mode = WPI_MODE_IBSS; 3749 sc->rxon.filter |= WPI_FILTER_BEACON; 3750 break; 3751 case IEEE80211_M_HOSTAP: 3752 /* XXX workaround for beaconing */ 3753 sc->rxon.mode = WPI_MODE_IBSS; 3754 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3755 break; 3756 case IEEE80211_M_AHDEMO: 3757 /* XXX workaround for passive channels selection */ 3758 sc->rxon.mode = WPI_MODE_HOSTAP; 3759 break; 3760 case IEEE80211_M_MONITOR: 3761 sc->rxon.mode = WPI_MODE_MONITOR; 3762 break; 3763 default: 3764 device_printf(sc->sc_dev, "unknown opmode %d\n", 3765 ic->ic_opmode); 3766 return EINVAL; 3767 } 3768 sc->rxon.filter = htole32(sc->rxon.filter); 3769 wpi_set_promisc(sc); 3770 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3771 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3772 3773 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3774 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3775 __func__); 3776 return error; 3777 } 3778 3779 /* Setup rate scalling. */ 3780 if ((error = wpi_mrr_setup(sc)) != 0) { 3781 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3782 error); 3783 return error; 3784 } 3785 3786 /* Disable beacon notifications (unused). */ 3787 flags = WPI_STATISTICS_BEACON_DISABLE; 3788 error = wpi_cmd(sc, WPI_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3789 if (error != 0) { 3790 device_printf(sc->sc_dev, 3791 "could not disable beacon statistics, error %d\n", error); 3792 return error; 3793 } 3794 3795 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3796 3797 return 0; 3798 } 3799 3800 static uint16_t 3801 wpi_get_active_dwell_time(struct wpi_softc *sc, 3802 struct ieee80211_channel *c, uint8_t n_probes) 3803 { 3804 /* No channel? Default to 2GHz settings. */ 3805 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3806 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3807 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3808 } 3809 3810 /* 5GHz dwell time. */ 3811 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3812 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3813 } 3814 3815 /* 3816 * Limit the total dwell time to 85% of the beacon interval. 3817 * 3818 * Returns the dwell time in milliseconds. 3819 */ 3820 static uint16_t 3821 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 3822 { 3823 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3824 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3825 int bintval = 0; 3826 3827 /* bintval is in TU (1.024mS) */ 3828 if (vap != NULL) 3829 bintval = vap->iv_bss->ni_intval; 3830 3831 /* 3832 * If it's non-zero, we should calculate the minimum of 3833 * it and the DWELL_BASE. 3834 * 3835 * XXX Yes, the math should take into account that bintval 3836 * is 1.024mS, not 1mS.. 3837 */ 3838 if (bintval > 0) { 3839 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 3840 bintval); 3841 return (MIN(WPI_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 3842 } 3843 3844 /* No association context? Default. */ 3845 return (WPI_PASSIVE_DWELL_BASE); 3846 } 3847 3848 static uint16_t 3849 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 3850 { 3851 uint16_t passive; 3852 3853 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 3854 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 3855 else 3856 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 3857 3858 /* Clamp to the beacon interval if we're associated. */ 3859 return (wpi_limit_dwell(sc, passive)); 3860 } 3861 3862 /* 3863 * Send a scan request to the firmware. 3864 */ 3865 static int 3866 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 3867 { 3868 struct ifnet *ifp = sc->sc_ifp; 3869 struct ieee80211com *ic = ifp->if_l2com; 3870 struct ieee80211_scan_state *ss = ic->ic_scan; 3871 struct ieee80211vap *vap = ss->ss_vap; 3872 struct wpi_scan_hdr *hdr; 3873 struct wpi_cmd_data *tx; 3874 struct wpi_scan_essid *essids; 3875 struct wpi_scan_chan *chan; 3876 struct ieee80211_frame *wh; 3877 struct ieee80211_rateset *rs; 3878 uint16_t dwell_active, dwell_passive; 3879 uint8_t *buf, *frm; 3880 int buflen, error, i, nssid; 3881 3882 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3883 3884 /* 3885 * We are absolutely not allowed to send a scan command when another 3886 * scan command is pending. 3887 */ 3888 if (callout_pending(&sc->scan_timeout)) { 3889 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 3890 __func__); 3891 3892 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3893 3894 return (EAGAIN); 3895 } 3896 3897 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 3898 if (buf == NULL) { 3899 device_printf(sc->sc_dev, 3900 "%s: could not allocate buffer for scan command\n", 3901 __func__); 3902 error = ENOMEM; 3903 goto fail; 3904 } 3905 hdr = (struct wpi_scan_hdr *)buf; 3906 3907 /* 3908 * Move to the next channel if no packets are received within 10 msecs 3909 * after sending the probe request. 3910 */ 3911 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 3912 hdr->quiet_threshold = htole16(1); /* min # of packets */ 3913 /* 3914 * Max needs to be greater than active and passive and quiet! 3915 * It's also in microseconds! 3916 */ 3917 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 3918 hdr->pause_svc = htole32((4 << 24) | 3919 (100 * IEEE80211_DUR_TU)); /* Hardcode for now */ 3920 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 3921 3922 tx = (struct wpi_cmd_data *)(hdr + 1); 3923 tx->flags = htole32(WPI_TX_AUTO_SEQ); 3924 tx->id = WPI_ID_BROADCAST; 3925 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3926 3927 if (IEEE80211_IS_CHAN_5GHZ(c)) { 3928 /* Send probe requests at 6Mbps. */ 3929 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 3930 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 3931 } else { 3932 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 3933 /* Send probe requests at 1Mbps. */ 3934 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3935 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 3936 } 3937 3938 essids = (struct wpi_scan_essid *)(tx + 1); 3939 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 3940 for (i = 0; i < nssid; i++) { 3941 essids[i].id = IEEE80211_ELEMID_SSID; 3942 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 3943 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 3944 #ifdef WPI_DEBUG 3945 if (sc->sc_debug & WPI_DEBUG_SCAN) { 3946 printf("Scanning Essid: "); 3947 ieee80211_print_essid(essids[i].data, essids[i].len); 3948 printf("\n"); 3949 } 3950 #endif 3951 } 3952 3953 /* 3954 * Build a probe request frame. Most of the following code is a 3955 * copy & paste of what is done in net80211. 3956 */ 3957 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 3958 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3959 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3960 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3961 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 3962 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 3963 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 3964 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ 3965 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ 3966 3967 frm = (uint8_t *)(wh + 1); 3968 frm = ieee80211_add_ssid(frm, NULL, 0); 3969 frm = ieee80211_add_rates(frm, rs); 3970 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 3971 frm = ieee80211_add_xrates(frm, rs); 3972 3973 /* Set length of probe request. */ 3974 tx->len = htole16(frm - (uint8_t *)wh); 3975 3976 /* 3977 * Construct information about the channel that we 3978 * want to scan. The firmware expects this to be directly 3979 * after the scan probe request 3980 */ 3981 chan = (struct wpi_scan_chan *)frm; 3982 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 3983 chan->flags = 0; 3984 if (nssid) { 3985 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 3986 chan->flags |= WPI_CHAN_NPBREQS(nssid); 3987 } else 3988 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 3989 3990 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 3991 chan->flags |= WPI_CHAN_ACTIVE; 3992 3993 /* 3994 * Calculate the active/passive dwell times. 3995 */ 3996 3997 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 3998 dwell_passive = wpi_get_passive_dwell_time(sc, c); 3999 4000 /* Make sure they're valid. */ 4001 if (dwell_passive <= dwell_active) 4002 dwell_passive = dwell_active + 1; 4003 4004 chan->active = htole16(dwell_active); 4005 chan->passive = htole16(dwell_passive); 4006 4007 chan->dsp_gain = 0x6e; /* Default level */ 4008 4009 if (IEEE80211_IS_CHAN_5GHZ(c)) 4010 chan->rf_gain = 0x3b; 4011 else 4012 chan->rf_gain = 0x28; 4013 4014 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4015 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4016 4017 hdr->nchan++; 4018 chan++; 4019 4020 buflen = (uint8_t *)chan - buf; 4021 hdr->len = htole16(buflen); 4022 4023 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4024 hdr->nchan); 4025 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4026 free(buf, M_DEVBUF); 4027 4028 if (error != 0) 4029 goto fail; 4030 4031 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4032 4033 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4034 4035 return 0; 4036 4037 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4038 4039 return error; 4040 } 4041 4042 static int 4043 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4044 { 4045 struct ieee80211com *ic = vap->iv_ic; 4046 struct ieee80211_node *ni = vap->iv_bss; 4047 int error; 4048 4049 WPI_RXON_LOCK(sc); 4050 4051 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4052 4053 /* Update adapter configuration. */ 4054 sc->rxon.associd = 0; 4055 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4056 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4057 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4058 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4059 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4060 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4061 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4062 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4063 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4064 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4065 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4066 sc->rxon.cck_mask = 0; 4067 sc->rxon.ofdm_mask = 0x15; 4068 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4069 sc->rxon.cck_mask = 0x03; 4070 sc->rxon.ofdm_mask = 0; 4071 } else { 4072 /* Assume 802.11b/g. */ 4073 sc->rxon.cck_mask = 0x0f; 4074 sc->rxon.ofdm_mask = 0x15; 4075 } 4076 4077 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4078 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4079 sc->rxon.ofdm_mask); 4080 4081 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4082 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4083 __func__); 4084 } 4085 4086 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4087 4088 WPI_RXON_UNLOCK(sc); 4089 4090 return error; 4091 } 4092 4093 static int 4094 wpi_config_beacon(struct wpi_vap *wvp) 4095 { 4096 struct ieee80211com *ic = wvp->wv_vap.iv_ic; 4097 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4098 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4099 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4100 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4101 struct ieee80211_tim_ie *tie; 4102 struct mbuf *m; 4103 uint8_t *ptr; 4104 int error; 4105 4106 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4107 4108 WPI_VAP_LOCK_ASSERT(wvp); 4109 4110 cmd->len = htole16(bcn->m->m_pkthdr.len); 4111 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4112 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4113 4114 /* XXX seems to be unused */ 4115 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4116 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4117 ptr = mtod(bcn->m, uint8_t *); 4118 4119 cmd->tim = htole16(bo->bo_tim - ptr); 4120 cmd->timsz = tie->tim_len; 4121 } 4122 4123 /* Necessary for recursion in ieee80211_beacon_update(). */ 4124 m = bcn->m; 4125 bcn->m = m_dup(m, M_NOWAIT); 4126 if (bcn->m == NULL) { 4127 device_printf(sc->sc_dev, 4128 "%s: could not copy beacon frame\n", __func__); 4129 error = ENOMEM; 4130 goto end; 4131 } 4132 4133 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4134 device_printf(sc->sc_dev, 4135 "%s: could not update beacon frame, error %d", __func__, 4136 error); 4137 } 4138 4139 /* Restore mbuf. */ 4140 end: bcn->m = m; 4141 4142 return error; 4143 } 4144 4145 static int 4146 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4147 { 4148 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 4149 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4150 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4151 struct mbuf *m; 4152 int error; 4153 4154 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4155 4156 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4157 return EINVAL; 4158 4159 m = ieee80211_beacon_alloc(ni, bo); 4160 if (m == NULL) { 4161 device_printf(sc->sc_dev, 4162 "%s: could not allocate beacon frame\n", __func__); 4163 return ENOMEM; 4164 } 4165 4166 WPI_VAP_LOCK(wvp); 4167 if (bcn->m != NULL) 4168 m_freem(bcn->m); 4169 4170 bcn->m = m; 4171 4172 error = wpi_config_beacon(wvp); 4173 WPI_VAP_UNLOCK(wvp); 4174 4175 return error; 4176 } 4177 4178 static void 4179 wpi_update_beacon(struct ieee80211vap *vap, int item) 4180 { 4181 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4182 struct wpi_vap *wvp = WPI_VAP(vap); 4183 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4184 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4185 struct ieee80211_node *ni = vap->iv_bss; 4186 int mcast = 0; 4187 4188 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4189 4190 WPI_VAP_LOCK(wvp); 4191 if (bcn->m == NULL) { 4192 bcn->m = ieee80211_beacon_alloc(ni, bo); 4193 if (bcn->m == NULL) { 4194 device_printf(sc->sc_dev, 4195 "%s: could not allocate beacon frame\n", __func__); 4196 4197 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4198 __func__); 4199 4200 WPI_VAP_UNLOCK(wvp); 4201 return; 4202 } 4203 } 4204 WPI_VAP_UNLOCK(wvp); 4205 4206 if (item == IEEE80211_BEACON_TIM) 4207 mcast = 1; /* TODO */ 4208 4209 setbit(bo->bo_flags, item); 4210 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4211 4212 WPI_VAP_LOCK(wvp); 4213 wpi_config_beacon(wvp); 4214 WPI_VAP_UNLOCK(wvp); 4215 4216 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4217 } 4218 4219 static void 4220 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4221 { 4222 struct ieee80211vap *vap = ni->ni_vap; 4223 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4224 struct wpi_node *wn = WPI_NODE(ni); 4225 int error; 4226 4227 WPI_NT_LOCK(sc); 4228 4229 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4230 4231 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4232 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4233 device_printf(sc->sc_dev, 4234 "%s: could not add IBSS node, error %d\n", 4235 __func__, error); 4236 } 4237 } 4238 WPI_NT_UNLOCK(sc); 4239 } 4240 4241 static int 4242 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4243 { 4244 struct ieee80211com *ic = vap->iv_ic; 4245 struct ieee80211_node *ni = vap->iv_bss; 4246 int error; 4247 4248 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4249 4250 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4251 /* Link LED blinks while monitoring. */ 4252 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4253 return 0; 4254 } 4255 4256 /* XXX kernel panic workaround */ 4257 if (ni->ni_chan == IEEE80211_CHAN_ANYC) { 4258 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4259 __func__); 4260 return EINVAL; 4261 } 4262 4263 if ((error = wpi_set_timing(sc, ni)) != 0) { 4264 device_printf(sc->sc_dev, 4265 "%s: could not set timing, error %d\n", __func__, error); 4266 return error; 4267 } 4268 4269 /* Update adapter configuration. */ 4270 WPI_RXON_LOCK(sc); 4271 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4272 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4273 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4274 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4275 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4276 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4277 /* Short preamble and slot time are negotiated when associating. */ 4278 sc->rxon.flags &= ~htole32(WPI_RXON_SHPREAMBLE | WPI_RXON_SHSLOT); 4279 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4280 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4281 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4282 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4283 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4284 sc->rxon.cck_mask = 0; 4285 sc->rxon.ofdm_mask = 0x15; 4286 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4287 sc->rxon.cck_mask = 0x03; 4288 sc->rxon.ofdm_mask = 0; 4289 } else { 4290 /* Assume 802.11b/g. */ 4291 sc->rxon.cck_mask = 0x0f; 4292 sc->rxon.ofdm_mask = 0x15; 4293 } 4294 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4295 4296 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4297 sc->rxon.chan, sc->rxon.flags); 4298 4299 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4300 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4301 __func__); 4302 return error; 4303 } 4304 4305 /* Start periodic calibration timer. */ 4306 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4307 4308 WPI_RXON_UNLOCK(sc); 4309 4310 if (vap->iv_opmode == IEEE80211_M_IBSS || 4311 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4312 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4313 device_printf(sc->sc_dev, 4314 "%s: could not setup beacon, error %d\n", __func__, 4315 error); 4316 return error; 4317 } 4318 } 4319 4320 if (vap->iv_opmode == IEEE80211_M_STA) { 4321 /* Add BSS node. */ 4322 WPI_NT_LOCK(sc); 4323 error = wpi_add_sta_node(sc, ni); 4324 WPI_NT_UNLOCK(sc); 4325 if (error != 0) { 4326 device_printf(sc->sc_dev, 4327 "%s: could not add BSS node, error %d\n", __func__, 4328 error); 4329 return error; 4330 } 4331 } 4332 4333 /* Link LED always on while associated. */ 4334 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4335 4336 /* Enable power-saving mode if requested by user. */ 4337 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4338 vap->iv_opmode != IEEE80211_M_IBSS) 4339 (void)wpi_set_pslevel(sc, 0, 3, 1); 4340 4341 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4342 4343 return 0; 4344 } 4345 4346 static int 4347 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4348 { 4349 const struct ieee80211_cipher *cip = k->wk_cipher; 4350 struct ieee80211vap *vap = ni->ni_vap; 4351 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4352 struct wpi_node *wn = WPI_NODE(ni); 4353 struct wpi_node_info node; 4354 uint16_t kflags; 4355 int error; 4356 4357 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4358 4359 if (wpi_check_node_entry(sc, wn->id) == 0) { 4360 device_printf(sc->sc_dev, "%s: node does not exist\n", 4361 __func__); 4362 return 0; 4363 } 4364 4365 switch (cip->ic_cipher) { 4366 case IEEE80211_CIPHER_AES_CCM: 4367 kflags = WPI_KFLAG_CCMP; 4368 break; 4369 4370 default: 4371 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4372 cip->ic_cipher); 4373 return 0; 4374 } 4375 4376 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4377 if (k->wk_flags & IEEE80211_KEY_GROUP) 4378 kflags |= WPI_KFLAG_MULTICAST; 4379 4380 memset(&node, 0, sizeof node); 4381 node.id = wn->id; 4382 node.control = WPI_NODE_UPDATE; 4383 node.flags = WPI_FLAG_KEY_SET; 4384 node.kflags = htole16(kflags); 4385 memcpy(node.key, k->wk_key, k->wk_keylen); 4386 again: 4387 DPRINTF(sc, WPI_DEBUG_KEY, 4388 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4389 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4390 node.id, ether_sprintf(ni->ni_macaddr)); 4391 4392 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4393 if (error != 0) { 4394 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4395 error); 4396 return !error; 4397 } 4398 4399 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4400 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4401 kflags |= WPI_KFLAG_MULTICAST; 4402 node.kflags = htole16(kflags); 4403 4404 goto again; 4405 } 4406 4407 return 1; 4408 } 4409 4410 static void 4411 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4412 { 4413 const struct ieee80211_key *k = arg; 4414 struct ieee80211vap *vap = ni->ni_vap; 4415 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4416 struct wpi_node *wn = WPI_NODE(ni); 4417 int error; 4418 4419 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4420 return; 4421 4422 WPI_NT_LOCK(sc); 4423 error = wpi_load_key(ni, k); 4424 WPI_NT_UNLOCK(sc); 4425 4426 if (error == 0) { 4427 device_printf(sc->sc_dev, "%s: error while setting key\n", 4428 __func__); 4429 } 4430 } 4431 4432 static int 4433 wpi_set_global_keys(struct ieee80211_node *ni) 4434 { 4435 struct ieee80211vap *vap = ni->ni_vap; 4436 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4437 int error = 1; 4438 4439 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4440 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4441 error = wpi_load_key(ni, wk); 4442 4443 return !error; 4444 } 4445 4446 static int 4447 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4448 { 4449 struct ieee80211vap *vap = ni->ni_vap; 4450 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4451 struct wpi_node *wn = WPI_NODE(ni); 4452 struct wpi_node_info node; 4453 uint16_t kflags; 4454 int error; 4455 4456 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4457 4458 if (wpi_check_node_entry(sc, wn->id) == 0) { 4459 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4460 return 1; /* Nothing to do. */ 4461 } 4462 4463 kflags = WPI_KFLAG_KID(k->wk_keyix); 4464 if (k->wk_flags & IEEE80211_KEY_GROUP) 4465 kflags |= WPI_KFLAG_MULTICAST; 4466 4467 memset(&node, 0, sizeof node); 4468 node.id = wn->id; 4469 node.control = WPI_NODE_UPDATE; 4470 node.flags = WPI_FLAG_KEY_SET; 4471 node.kflags = htole16(kflags); 4472 again: 4473 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4474 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4475 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4476 4477 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4478 if (error != 0) { 4479 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4480 error); 4481 return !error; 4482 } 4483 4484 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4485 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4486 kflags |= WPI_KFLAG_MULTICAST; 4487 node.kflags = htole16(kflags); 4488 4489 goto again; 4490 } 4491 4492 return 1; 4493 } 4494 4495 static void 4496 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4497 { 4498 const struct ieee80211_key *k = arg; 4499 struct ieee80211vap *vap = ni->ni_vap; 4500 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4501 struct wpi_node *wn = WPI_NODE(ni); 4502 int error; 4503 4504 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4505 return; 4506 4507 WPI_NT_LOCK(sc); 4508 error = wpi_del_key(ni, k); 4509 WPI_NT_UNLOCK(sc); 4510 4511 if (error == 0) { 4512 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4513 __func__); 4514 } 4515 } 4516 4517 static int 4518 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4519 int set) 4520 { 4521 struct ieee80211com *ic = vap->iv_ic; 4522 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4523 struct wpi_vap *wvp = WPI_VAP(vap); 4524 struct ieee80211_node *ni; 4525 int error, ni_ref = 0; 4526 4527 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4528 4529 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4530 /* Not for us. */ 4531 return 1; 4532 } 4533 4534 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4535 /* XMIT keys are handled in wpi_tx_data(). */ 4536 return 1; 4537 } 4538 4539 /* Handle group keys. */ 4540 if (&vap->iv_nw_keys[0] <= k && 4541 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4542 WPI_NT_LOCK(sc); 4543 if (set) 4544 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4545 else 4546 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4547 WPI_NT_UNLOCK(sc); 4548 4549 if (vap->iv_state == IEEE80211_S_RUN) { 4550 ieee80211_iterate_nodes(&ic->ic_sta, 4551 set ? wpi_load_key_cb : wpi_del_key_cb, 4552 __DECONST(void *, k)); 4553 } 4554 4555 return 1; 4556 } 4557 4558 switch (vap->iv_opmode) { 4559 case IEEE80211_M_STA: 4560 ni = vap->iv_bss; 4561 break; 4562 4563 case IEEE80211_M_IBSS: 4564 case IEEE80211_M_AHDEMO: 4565 case IEEE80211_M_HOSTAP: 4566 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4567 if (ni == NULL) 4568 return 0; /* should not happen */ 4569 4570 ni_ref = 1; 4571 break; 4572 4573 default: 4574 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4575 vap->iv_opmode); 4576 return 0; 4577 } 4578 4579 WPI_NT_LOCK(sc); 4580 if (set) 4581 error = wpi_load_key(ni, k); 4582 else 4583 error = wpi_del_key(ni, k); 4584 WPI_NT_UNLOCK(sc); 4585 4586 if (ni_ref) 4587 ieee80211_node_decref(ni); 4588 4589 return error; 4590 } 4591 4592 static int 4593 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4594 const uint8_t mac[IEEE80211_ADDR_LEN]) 4595 { 4596 return wpi_process_key(vap, k, 1); 4597 } 4598 4599 static int 4600 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4601 { 4602 return wpi_process_key(vap, k, 0); 4603 } 4604 4605 /* 4606 * This function is called after the runtime firmware notifies us of its 4607 * readiness (called in a process context). 4608 */ 4609 static int 4610 wpi_post_alive(struct wpi_softc *sc) 4611 { 4612 int ntries, error; 4613 4614 /* Check (again) that the radio is not disabled. */ 4615 if ((error = wpi_nic_lock(sc)) != 0) 4616 return error; 4617 4618 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4619 4620 /* NB: Runtime firmware must be up and running. */ 4621 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4622 device_printf(sc->sc_dev, 4623 "RF switch: radio disabled (%s)\n", __func__); 4624 wpi_nic_unlock(sc); 4625 return EPERM; /* :-) */ 4626 } 4627 wpi_nic_unlock(sc); 4628 4629 /* Wait for thermal sensor to calibrate. */ 4630 for (ntries = 0; ntries < 1000; ntries++) { 4631 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4632 break; 4633 DELAY(10); 4634 } 4635 4636 if (ntries == 1000) { 4637 device_printf(sc->sc_dev, 4638 "timeout waiting for thermal sensor calibration\n"); 4639 return ETIMEDOUT; 4640 } 4641 4642 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4643 return 0; 4644 } 4645 4646 /* 4647 * The firmware boot code is small and is intended to be copied directly into 4648 * the NIC internal memory (no DMA transfer). 4649 */ 4650 static int 4651 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4652 { 4653 int error, ntries; 4654 4655 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4656 4657 size /= sizeof (uint32_t); 4658 4659 if ((error = wpi_nic_lock(sc)) != 0) 4660 return error; 4661 4662 /* Copy microcode image into NIC memory. */ 4663 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4664 (const uint32_t *)ucode, size); 4665 4666 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4667 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4668 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4669 4670 /* Start boot load now. */ 4671 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4672 4673 /* Wait for transfer to complete. */ 4674 for (ntries = 0; ntries < 1000; ntries++) { 4675 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4676 DPRINTF(sc, WPI_DEBUG_HW, 4677 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4678 WPI_FH_TX_STATUS_IDLE(6), 4679 status & WPI_FH_TX_STATUS_IDLE(6)); 4680 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4681 DPRINTF(sc, WPI_DEBUG_HW, 4682 "Status Match! - ntries = %d\n", ntries); 4683 break; 4684 } 4685 DELAY(10); 4686 } 4687 if (ntries == 1000) { 4688 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4689 __func__); 4690 wpi_nic_unlock(sc); 4691 return ETIMEDOUT; 4692 } 4693 4694 /* Enable boot after power up. */ 4695 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4696 4697 wpi_nic_unlock(sc); 4698 return 0; 4699 } 4700 4701 static int 4702 wpi_load_firmware(struct wpi_softc *sc) 4703 { 4704 struct wpi_fw_info *fw = &sc->fw; 4705 struct wpi_dma_info *dma = &sc->fw_dma; 4706 int error; 4707 4708 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4709 4710 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4711 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4712 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4713 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4714 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4715 4716 /* Tell adapter where to find initialization sections. */ 4717 if ((error = wpi_nic_lock(sc)) != 0) 4718 return error; 4719 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4720 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4721 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4722 dma->paddr + WPI_FW_DATA_MAXSZ); 4723 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4724 wpi_nic_unlock(sc); 4725 4726 /* Load firmware boot code. */ 4727 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4728 if (error != 0) { 4729 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4730 __func__); 4731 return error; 4732 } 4733 4734 /* Now press "execute". */ 4735 WPI_WRITE(sc, WPI_RESET, 0); 4736 4737 /* Wait at most one second for first alive notification. */ 4738 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4739 device_printf(sc->sc_dev, 4740 "%s: timeout waiting for adapter to initialize, error %d\n", 4741 __func__, error); 4742 return error; 4743 } 4744 4745 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4746 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4747 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4748 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4749 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4750 4751 /* Tell adapter where to find runtime sections. */ 4752 if ((error = wpi_nic_lock(sc)) != 0) 4753 return error; 4754 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4755 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4756 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4757 dma->paddr + WPI_FW_DATA_MAXSZ); 4758 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4759 WPI_FW_UPDATED | fw->main.textsz); 4760 wpi_nic_unlock(sc); 4761 4762 return 0; 4763 } 4764 4765 static int 4766 wpi_read_firmware(struct wpi_softc *sc) 4767 { 4768 const struct firmware *fp; 4769 struct wpi_fw_info *fw = &sc->fw; 4770 const struct wpi_firmware_hdr *hdr; 4771 int error; 4772 4773 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4774 4775 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4776 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4777 4778 WPI_UNLOCK(sc); 4779 fp = firmware_get(WPI_FW_NAME); 4780 WPI_LOCK(sc); 4781 4782 if (fp == NULL) { 4783 device_printf(sc->sc_dev, 4784 "could not load firmware image '%s'\n", WPI_FW_NAME); 4785 return EINVAL; 4786 } 4787 4788 sc->fw_fp = fp; 4789 4790 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 4791 device_printf(sc->sc_dev, 4792 "firmware file too short: %zu bytes\n", fp->datasize); 4793 error = EINVAL; 4794 goto fail; 4795 } 4796 4797 fw->size = fp->datasize; 4798 fw->data = (const uint8_t *)fp->data; 4799 4800 /* Extract firmware header information. */ 4801 hdr = (const struct wpi_firmware_hdr *)fw->data; 4802 4803 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 4804 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 4805 4806 fw->main.textsz = le32toh(hdr->rtextsz); 4807 fw->main.datasz = le32toh(hdr->rdatasz); 4808 fw->init.textsz = le32toh(hdr->itextsz); 4809 fw->init.datasz = le32toh(hdr->idatasz); 4810 fw->boot.textsz = le32toh(hdr->btextsz); 4811 fw->boot.datasz = 0; 4812 4813 /* Sanity-check firmware header. */ 4814 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 4815 fw->main.datasz > WPI_FW_DATA_MAXSZ || 4816 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 4817 fw->init.datasz > WPI_FW_DATA_MAXSZ || 4818 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 4819 (fw->boot.textsz & 3) != 0) { 4820 device_printf(sc->sc_dev, "invalid firmware header\n"); 4821 error = EINVAL; 4822 goto fail; 4823 } 4824 4825 /* Check that all firmware sections fit. */ 4826 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 4827 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 4828 device_printf(sc->sc_dev, 4829 "firmware file too short: %zu bytes\n", fw->size); 4830 error = EINVAL; 4831 goto fail; 4832 } 4833 4834 /* Get pointers to firmware sections. */ 4835 fw->main.text = (const uint8_t *)(hdr + 1); 4836 fw->main.data = fw->main.text + fw->main.textsz; 4837 fw->init.text = fw->main.data + fw->main.datasz; 4838 fw->init.data = fw->init.text + fw->init.textsz; 4839 fw->boot.text = fw->init.data + fw->init.datasz; 4840 4841 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4842 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 4843 "runtime (text: %u, data: %u) init (text: %u, data %u) " 4844 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 4845 fw->main.textsz, fw->main.datasz, 4846 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 4847 4848 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 4849 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 4850 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 4851 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 4852 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 4853 4854 return 0; 4855 4856 fail: wpi_unload_firmware(sc); 4857 return error; 4858 } 4859 4860 /** 4861 * Free the referenced firmware image 4862 */ 4863 static void 4864 wpi_unload_firmware(struct wpi_softc *sc) 4865 { 4866 if (sc->fw_fp != NULL) { 4867 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 4868 sc->fw_fp = NULL; 4869 } 4870 } 4871 4872 static int 4873 wpi_clock_wait(struct wpi_softc *sc) 4874 { 4875 int ntries; 4876 4877 /* Set "initialization complete" bit. */ 4878 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4879 4880 /* Wait for clock stabilization. */ 4881 for (ntries = 0; ntries < 2500; ntries++) { 4882 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 4883 return 0; 4884 DELAY(100); 4885 } 4886 device_printf(sc->sc_dev, 4887 "%s: timeout waiting for clock stabilization\n", __func__); 4888 4889 return ETIMEDOUT; 4890 } 4891 4892 static int 4893 wpi_apm_init(struct wpi_softc *sc) 4894 { 4895 uint32_t reg; 4896 int error; 4897 4898 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4899 4900 /* Disable L0s exit timer (NMI bug workaround). */ 4901 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 4902 /* Don't wait for ICH L0s (ICH bug workaround). */ 4903 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 4904 4905 /* Set FH wait threshold to max (HW bug under stress workaround). */ 4906 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 4907 4908 /* Retrieve PCIe Active State Power Management (ASPM). */ 4909 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4910 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 4911 if (reg & 0x02) /* L1 Entry enabled. */ 4912 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4913 else 4914 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4915 4916 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 4917 4918 /* Wait for clock stabilization before accessing prph. */ 4919 if ((error = wpi_clock_wait(sc)) != 0) 4920 return error; 4921 4922 if ((error = wpi_nic_lock(sc)) != 0) 4923 return error; 4924 /* Cleanup. */ 4925 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 4926 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 4927 4928 /* Enable DMA and BSM (Bootstrap State Machine). */ 4929 wpi_prph_write(sc, WPI_APMG_CLK_EN, 4930 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 4931 DELAY(20); 4932 /* Disable L1-Active. */ 4933 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 4934 wpi_nic_unlock(sc); 4935 4936 return 0; 4937 } 4938 4939 static void 4940 wpi_apm_stop_master(struct wpi_softc *sc) 4941 { 4942 int ntries; 4943 4944 /* Stop busmaster DMA activity. */ 4945 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 4946 4947 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 4948 WPI_GP_CNTRL_MAC_PS) 4949 return; /* Already asleep. */ 4950 4951 for (ntries = 0; ntries < 100; ntries++) { 4952 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 4953 return; 4954 DELAY(10); 4955 } 4956 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 4957 __func__); 4958 } 4959 4960 static void 4961 wpi_apm_stop(struct wpi_softc *sc) 4962 { 4963 wpi_apm_stop_master(sc); 4964 4965 /* Reset the entire device. */ 4966 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 4967 DELAY(10); 4968 /* Clear "initialization complete" bit. */ 4969 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4970 } 4971 4972 static void 4973 wpi_nic_config(struct wpi_softc *sc) 4974 { 4975 uint32_t rev; 4976 4977 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4978 4979 /* voodoo from the Linux "driver".. */ 4980 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 4981 if ((rev & 0xc0) == 0x40) 4982 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 4983 else if (!(rev & 0x80)) 4984 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 4985 4986 if (sc->cap == 0x80) 4987 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 4988 4989 if ((sc->rev & 0xf0) == 0xd0) 4990 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 4991 else 4992 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 4993 4994 if (sc->type > 1) 4995 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 4996 } 4997 4998 static int 4999 wpi_hw_init(struct wpi_softc *sc) 5000 { 5001 int chnl, ntries, error; 5002 5003 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5004 5005 /* Clear pending interrupts. */ 5006 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5007 5008 if ((error = wpi_apm_init(sc)) != 0) { 5009 device_printf(sc->sc_dev, 5010 "%s: could not power ON adapter, error %d\n", __func__, 5011 error); 5012 return error; 5013 } 5014 5015 /* Select VMAIN power source. */ 5016 if ((error = wpi_nic_lock(sc)) != 0) 5017 return error; 5018 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5019 wpi_nic_unlock(sc); 5020 /* Spin until VMAIN gets selected. */ 5021 for (ntries = 0; ntries < 5000; ntries++) { 5022 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5023 break; 5024 DELAY(10); 5025 } 5026 if (ntries == 5000) { 5027 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5028 return ETIMEDOUT; 5029 } 5030 5031 /* Perform adapter initialization. */ 5032 wpi_nic_config(sc); 5033 5034 /* Initialize RX ring. */ 5035 if ((error = wpi_nic_lock(sc)) != 0) 5036 return error; 5037 /* Set physical address of RX ring. */ 5038 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5039 /* Set physical address of RX read pointer. */ 5040 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5041 offsetof(struct wpi_shared, next)); 5042 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5043 /* Enable RX. */ 5044 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5045 WPI_FH_RX_CONFIG_DMA_ENA | 5046 WPI_FH_RX_CONFIG_RDRBD_ENA | 5047 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5048 WPI_FH_RX_CONFIG_MAXFRAG | 5049 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5050 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5051 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5052 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5053 wpi_nic_unlock(sc); 5054 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5055 5056 /* Initialize TX rings. */ 5057 if ((error = wpi_nic_lock(sc)) != 0) 5058 return error; 5059 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5060 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5061 /* Enable all 6 TX rings. */ 5062 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5063 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5064 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5065 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5066 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5067 /* Set physical address of TX rings. */ 5068 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5069 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5070 5071 /* Enable all DMA channels. */ 5072 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5073 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5074 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5075 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5076 } 5077 wpi_nic_unlock(sc); 5078 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5079 5080 /* Clear "radio off" and "commands blocked" bits. */ 5081 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5082 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5083 5084 /* Clear pending interrupts. */ 5085 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5086 /* Enable interrupts. */ 5087 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5088 5089 /* _Really_ make sure "radio off" bit is cleared! */ 5090 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5091 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5092 5093 if ((error = wpi_load_firmware(sc)) != 0) { 5094 device_printf(sc->sc_dev, 5095 "%s: could not load firmware, error %d\n", __func__, 5096 error); 5097 return error; 5098 } 5099 /* Wait at most one second for firmware alive notification. */ 5100 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5101 device_printf(sc->sc_dev, 5102 "%s: timeout waiting for adapter to initialize, error %d\n", 5103 __func__, error); 5104 return error; 5105 } 5106 5107 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5108 5109 /* Do post-firmware initialization. */ 5110 return wpi_post_alive(sc); 5111 } 5112 5113 static void 5114 wpi_hw_stop(struct wpi_softc *sc) 5115 { 5116 int chnl, qid, ntries; 5117 5118 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5119 5120 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5121 wpi_nic_lock(sc); 5122 5123 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5124 5125 /* Disable interrupts. */ 5126 WPI_WRITE(sc, WPI_INT_MASK, 0); 5127 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5128 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5129 5130 /* Make sure we no longer hold the NIC lock. */ 5131 wpi_nic_unlock(sc); 5132 5133 if (wpi_nic_lock(sc) == 0) { 5134 /* Stop TX scheduler. */ 5135 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5136 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5137 5138 /* Stop all DMA channels. */ 5139 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5140 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5141 for (ntries = 0; ntries < 200; ntries++) { 5142 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5143 WPI_FH_TX_STATUS_IDLE(chnl)) 5144 break; 5145 DELAY(10); 5146 } 5147 } 5148 wpi_nic_unlock(sc); 5149 } 5150 5151 /* Stop RX ring. */ 5152 wpi_reset_rx_ring(sc); 5153 5154 /* Reset all TX rings. */ 5155 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5156 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5157 5158 if (wpi_nic_lock(sc) == 0) { 5159 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5160 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5161 wpi_nic_unlock(sc); 5162 } 5163 DELAY(5); 5164 /* Power OFF adapter. */ 5165 wpi_apm_stop(sc); 5166 } 5167 5168 static void 5169 wpi_radio_on(void *arg0, int pending) 5170 { 5171 struct wpi_softc *sc = arg0; 5172 struct ifnet *ifp = sc->sc_ifp; 5173 struct ieee80211com *ic = ifp->if_l2com; 5174 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5175 5176 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5177 5178 if (vap != NULL) { 5179 wpi_init(sc); 5180 ieee80211_init(vap); 5181 } 5182 5183 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) { 5184 WPI_LOCK(sc); 5185 callout_stop(&sc->watchdog_rfkill); 5186 WPI_UNLOCK(sc); 5187 } 5188 } 5189 5190 static void 5191 wpi_radio_off(void *arg0, int pending) 5192 { 5193 struct wpi_softc *sc = arg0; 5194 struct ifnet *ifp = sc->sc_ifp; 5195 struct ieee80211com *ic = ifp->if_l2com; 5196 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5197 5198 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5199 5200 wpi_stop(sc); 5201 if (vap != NULL) 5202 ieee80211_stop(vap); 5203 5204 WPI_LOCK(sc); 5205 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5206 WPI_UNLOCK(sc); 5207 } 5208 5209 static void 5210 wpi_init(void *arg) 5211 { 5212 struct wpi_softc *sc = arg; 5213 struct ifnet *ifp = sc->sc_ifp; 5214 struct ieee80211com *ic = ifp->if_l2com; 5215 int error; 5216 5217 WPI_LOCK(sc); 5218 5219 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5220 5221 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 5222 goto end; 5223 5224 /* Check that the radio is not disabled by hardware switch. */ 5225 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5226 device_printf(sc->sc_dev, 5227 "RF switch: radio disabled (%s)\n", __func__); 5228 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5229 sc); 5230 goto end; 5231 } 5232 5233 /* Read firmware images from the filesystem. */ 5234 if ((error = wpi_read_firmware(sc)) != 0) { 5235 device_printf(sc->sc_dev, 5236 "%s: could not read firmware, error %d\n", __func__, 5237 error); 5238 goto fail; 5239 } 5240 5241 /* Initialize hardware and upload firmware. */ 5242 error = wpi_hw_init(sc); 5243 wpi_unload_firmware(sc); 5244 if (error != 0) { 5245 device_printf(sc->sc_dev, 5246 "%s: could not initialize hardware, error %d\n", __func__, 5247 error); 5248 goto fail; 5249 } 5250 5251 /* Configure adapter now that it is ready. */ 5252 sc->txq_active = 1; 5253 if ((error = wpi_config(sc)) != 0) { 5254 device_printf(sc->sc_dev, 5255 "%s: could not configure device, error %d\n", __func__, 5256 error); 5257 goto fail; 5258 } 5259 5260 IF_LOCK(&ifp->if_snd); 5261 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5262 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5263 IF_UNLOCK(&ifp->if_snd); 5264 5265 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5266 5267 WPI_UNLOCK(sc); 5268 5269 ieee80211_start_all(ic); 5270 5271 return; 5272 5273 fail: wpi_stop_locked(sc); 5274 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5275 WPI_UNLOCK(sc); 5276 } 5277 5278 static void 5279 wpi_stop_locked(struct wpi_softc *sc) 5280 { 5281 struct ifnet *ifp = sc->sc_ifp; 5282 5283 WPI_LOCK_ASSERT(sc); 5284 5285 WPI_TXQ_LOCK(sc); 5286 sc->txq_active = 0; 5287 WPI_TXQ_UNLOCK(sc); 5288 5289 WPI_TXQ_STATE_LOCK(sc); 5290 callout_stop(&sc->tx_timeout); 5291 WPI_TXQ_STATE_UNLOCK(sc); 5292 5293 WPI_RXON_LOCK(sc); 5294 callout_stop(&sc->scan_timeout); 5295 callout_stop(&sc->calib_to); 5296 WPI_RXON_UNLOCK(sc); 5297 5298 IF_LOCK(&ifp->if_snd); 5299 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5300 IF_UNLOCK(&ifp->if_snd); 5301 5302 /* Power OFF hardware. */ 5303 wpi_hw_stop(sc); 5304 } 5305 5306 static void 5307 wpi_stop(struct wpi_softc *sc) 5308 { 5309 WPI_LOCK(sc); 5310 wpi_stop_locked(sc); 5311 WPI_UNLOCK(sc); 5312 } 5313 5314 /* 5315 * Callback from net80211 to start a scan. 5316 */ 5317 static void 5318 wpi_scan_start(struct ieee80211com *ic) 5319 { 5320 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5321 5322 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5323 } 5324 5325 /* 5326 * Callback from net80211 to terminate a scan. 5327 */ 5328 static void 5329 wpi_scan_end(struct ieee80211com *ic) 5330 { 5331 struct ifnet *ifp = ic->ic_ifp; 5332 struct wpi_softc *sc = ifp->if_softc; 5333 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5334 5335 if (vap->iv_state == IEEE80211_S_RUN) 5336 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5337 } 5338 5339 /** 5340 * Called by the net80211 framework to indicate to the driver 5341 * that the channel should be changed 5342 */ 5343 static void 5344 wpi_set_channel(struct ieee80211com *ic) 5345 { 5346 const struct ieee80211_channel *c = ic->ic_curchan; 5347 struct ifnet *ifp = ic->ic_ifp; 5348 struct wpi_softc *sc = ifp->if_softc; 5349 int error; 5350 5351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5352 5353 WPI_LOCK(sc); 5354 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5355 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5356 WPI_UNLOCK(sc); 5357 WPI_TX_LOCK(sc); 5358 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5359 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5360 WPI_TX_UNLOCK(sc); 5361 5362 /* 5363 * Only need to set the channel in Monitor mode. AP scanning and auth 5364 * are already taken care of by their respective firmware commands. 5365 */ 5366 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5367 WPI_RXON_LOCK(sc); 5368 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5369 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5370 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5371 WPI_RXON_24GHZ); 5372 } else { 5373 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5374 WPI_RXON_24GHZ); 5375 } 5376 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5377 device_printf(sc->sc_dev, 5378 "%s: error %d setting channel\n", __func__, 5379 error); 5380 WPI_RXON_UNLOCK(sc); 5381 } 5382 } 5383 5384 /** 5385 * Called by net80211 to indicate that we need to scan the current 5386 * channel. The channel is previously be set via the wpi_set_channel 5387 * callback. 5388 */ 5389 static void 5390 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5391 { 5392 struct ieee80211vap *vap = ss->ss_vap; 5393 struct ieee80211com *ic = vap->iv_ic; 5394 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5395 int error; 5396 5397 WPI_RXON_LOCK(sc); 5398 if (sc->rxon.chan != ieee80211_chan2ieee(ic, ic->ic_curchan)) { 5399 error = wpi_scan(sc, ic->ic_curchan); 5400 WPI_RXON_UNLOCK(sc); 5401 if (error != 0) 5402 ieee80211_cancel_scan(vap); 5403 } else { 5404 WPI_RXON_UNLOCK(sc); 5405 /* Send probe request when associated. */ 5406 sc->sc_scan_curchan(ss, maxdwell); 5407 } 5408 } 5409 5410 /** 5411 * Called by the net80211 framework to indicate 5412 * the minimum dwell time has been met, terminate the scan. 5413 * We don't actually terminate the scan as the firmware will notify 5414 * us when it's finished and we have no way to interrupt it. 5415 */ 5416 static void 5417 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5418 { 5419 /* NB: don't try to abort scan; wait for firmware to finish */ 5420 } 5421 5422 static void 5423 wpi_hw_reset(void *arg, int pending) 5424 { 5425 struct wpi_softc *sc = arg; 5426 struct ifnet *ifp = sc->sc_ifp; 5427 struct ieee80211com *ic = ifp->if_l2com; 5428 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5429 5430 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5431 5432 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5433 ieee80211_cancel_scan(vap); 5434 5435 wpi_stop(sc); 5436 if (vap != NULL) 5437 ieee80211_stop(vap); 5438 wpi_init(sc); 5439 if (vap != NULL) 5440 ieee80211_init(vap); 5441 } 5442