1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 int); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, int); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, int); 176 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 181 const struct ieee80211_rx_stats *, 182 int, int); 183 static void wpi_restore_node(void *, struct ieee80211_node *); 184 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 185 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static void wpi_calib_timeout(void *); 187 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 188 struct wpi_rx_data *); 189 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 190 struct wpi_rx_data *); 191 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_notif_intr(struct wpi_softc *); 194 static void wpi_wakeup_intr(struct wpi_softc *); 195 #ifdef WPI_DEBUG 196 static void wpi_debug_registers(struct wpi_softc *); 197 #endif 198 static void wpi_fatal_intr(struct wpi_softc *); 199 static void wpi_intr(void *); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 209 static void wpi_watchdog_rfkill(void *); 210 static void wpi_scan_timeout(void *); 211 static void wpi_tx_timeout(void *); 212 static void wpi_parent(struct ieee80211com *); 213 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 214 static int wpi_mrr_setup(struct wpi_softc *); 215 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 216 static int wpi_add_broadcast_node(struct wpi_softc *, int); 217 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 218 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 219 static int wpi_updateedca(struct ieee80211com *); 220 static void wpi_set_promisc(struct wpi_softc *); 221 static void wpi_update_promisc(struct ieee80211com *); 222 static void wpi_update_mcast(struct ieee80211com *); 223 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 224 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 225 static void wpi_power_calibration(struct wpi_softc *); 226 static int wpi_set_txpower(struct wpi_softc *, int); 227 static int wpi_get_power_index(struct wpi_softc *, 228 struct wpi_power_group *, uint8_t, int, int); 229 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 230 static int wpi_send_btcoex(struct wpi_softc *); 231 static int wpi_send_rxon(struct wpi_softc *, int, int); 232 static int wpi_config(struct wpi_softc *); 233 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 234 struct ieee80211_channel *, uint8_t); 235 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 236 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 237 struct ieee80211_channel *); 238 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 239 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 240 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 241 static int wpi_config_beacon(struct wpi_vap *); 242 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 243 static void wpi_update_beacon(struct ieee80211vap *, int); 244 static void wpi_newassoc(struct ieee80211_node *, int); 245 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 246 static int wpi_load_key(struct ieee80211_node *, 247 const struct ieee80211_key *); 248 static void wpi_load_key_cb(void *, struct ieee80211_node *); 249 static int wpi_set_global_keys(struct ieee80211_node *); 250 static int wpi_del_key(struct ieee80211_node *, 251 const struct ieee80211_key *); 252 static void wpi_del_key_cb(void *, struct ieee80211_node *); 253 static int wpi_process_key(struct ieee80211vap *, 254 const struct ieee80211_key *, int); 255 static int wpi_key_set(struct ieee80211vap *, 256 const struct ieee80211_key *); 257 static int wpi_key_delete(struct ieee80211vap *, 258 const struct ieee80211_key *); 259 static int wpi_post_alive(struct wpi_softc *); 260 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 261 static int wpi_load_firmware(struct wpi_softc *); 262 static int wpi_read_firmware(struct wpi_softc *); 263 static void wpi_unload_firmware(struct wpi_softc *); 264 static int wpi_clock_wait(struct wpi_softc *); 265 static int wpi_apm_init(struct wpi_softc *); 266 static void wpi_apm_stop_master(struct wpi_softc *); 267 static void wpi_apm_stop(struct wpi_softc *); 268 static void wpi_nic_config(struct wpi_softc *); 269 static int wpi_hw_init(struct wpi_softc *); 270 static void wpi_hw_stop(struct wpi_softc *); 271 static void wpi_radio_on(void *, int); 272 static void wpi_radio_off(void *, int); 273 static int wpi_init(struct wpi_softc *); 274 static void wpi_stop_locked(struct wpi_softc *); 275 static void wpi_stop(struct wpi_softc *); 276 static void wpi_scan_start(struct ieee80211com *); 277 static void wpi_scan_end(struct ieee80211com *); 278 static void wpi_set_channel(struct ieee80211com *); 279 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 280 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 281 static void wpi_hw_reset(void *, int); 282 283 static device_method_t wpi_methods[] = { 284 /* Device interface */ 285 DEVMETHOD(device_probe, wpi_probe), 286 DEVMETHOD(device_attach, wpi_attach), 287 DEVMETHOD(device_detach, wpi_detach), 288 DEVMETHOD(device_shutdown, wpi_shutdown), 289 DEVMETHOD(device_suspend, wpi_suspend), 290 DEVMETHOD(device_resume, wpi_resume), 291 292 DEVMETHOD_END 293 }; 294 295 static driver_t wpi_driver = { 296 "wpi", 297 wpi_methods, 298 sizeof (struct wpi_softc) 299 }; 300 static devclass_t wpi_devclass; 301 302 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 303 304 MODULE_VERSION(wpi, 1); 305 306 MODULE_DEPEND(wpi, pci, 1, 1, 1); 307 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 308 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 309 310 static int 311 wpi_probe(device_t dev) 312 { 313 const struct wpi_ident *ident; 314 315 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 316 if (pci_get_vendor(dev) == ident->vendor && 317 pci_get_device(dev) == ident->device) { 318 device_set_desc(dev, ident->name); 319 return (BUS_PROBE_DEFAULT); 320 } 321 } 322 return ENXIO; 323 } 324 325 static int 326 wpi_attach(device_t dev) 327 { 328 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 329 struct ieee80211com *ic; 330 int i, error, rid; 331 #ifdef WPI_DEBUG 332 int supportsa = 1; 333 const struct wpi_ident *ident; 334 #endif 335 336 sc->sc_dev = dev; 337 338 #ifdef WPI_DEBUG 339 error = resource_int_value(device_get_name(sc->sc_dev), 340 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 341 if (error != 0) 342 sc->sc_debug = 0; 343 #else 344 sc->sc_debug = 0; 345 #endif 346 347 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 348 349 /* 350 * Get the offset of the PCI Express Capability Structure in PCI 351 * Configuration Space. 352 */ 353 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 354 if (error != 0) { 355 device_printf(dev, "PCIe capability structure not found!\n"); 356 return error; 357 } 358 359 /* 360 * Some card's only support 802.11b/g not a, check to see if 361 * this is one such card. A 0x0 in the subdevice table indicates 362 * the entire subdevice range is to be ignored. 363 */ 364 #ifdef WPI_DEBUG 365 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 366 if (ident->subdevice && 367 pci_get_subdevice(dev) == ident->subdevice) { 368 supportsa = 0; 369 break; 370 } 371 } 372 #endif 373 374 /* Clear device-specific "PCI retry timeout" register (41h). */ 375 pci_write_config(dev, 0x41, 0, 1); 376 377 /* Enable bus-mastering. */ 378 pci_enable_busmaster(dev); 379 380 rid = PCIR_BAR(0); 381 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 382 RF_ACTIVE); 383 if (sc->mem == NULL) { 384 device_printf(dev, "can't map mem space\n"); 385 return ENOMEM; 386 } 387 sc->sc_st = rman_get_bustag(sc->mem); 388 sc->sc_sh = rman_get_bushandle(sc->mem); 389 390 i = 1; 391 rid = 0; 392 if (pci_alloc_msi(dev, &i) == 0) 393 rid = 1; 394 /* Install interrupt handler. */ 395 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 396 (rid != 0 ? 0 : RF_SHAREABLE)); 397 if (sc->irq == NULL) { 398 device_printf(dev, "can't map interrupt\n"); 399 error = ENOMEM; 400 goto fail; 401 } 402 403 WPI_LOCK_INIT(sc); 404 WPI_TX_LOCK_INIT(sc); 405 WPI_RXON_LOCK_INIT(sc); 406 WPI_NT_LOCK_INIT(sc); 407 WPI_TXQ_LOCK_INIT(sc); 408 WPI_TXQ_STATE_LOCK_INIT(sc); 409 410 /* Allocate DMA memory for firmware transfers. */ 411 if ((error = wpi_alloc_fwmem(sc)) != 0) { 412 device_printf(dev, 413 "could not allocate memory for firmware, error %d\n", 414 error); 415 goto fail; 416 } 417 418 /* Allocate shared page. */ 419 if ((error = wpi_alloc_shared(sc)) != 0) { 420 device_printf(dev, "could not allocate shared page\n"); 421 goto fail; 422 } 423 424 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 425 for (i = 0; i < WPI_NTXQUEUES; i++) { 426 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 427 device_printf(dev, 428 "could not allocate TX ring %d, error %d\n", i, 429 error); 430 goto fail; 431 } 432 } 433 434 /* Allocate RX ring. */ 435 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 436 device_printf(dev, "could not allocate RX ring, error %d\n", 437 error); 438 goto fail; 439 } 440 441 /* Clear pending interrupts. */ 442 WPI_WRITE(sc, WPI_INT, 0xffffffff); 443 444 ic = &sc->sc_ic; 445 ic->ic_softc = sc; 446 ic->ic_name = device_get_nameunit(dev); 447 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 448 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 449 450 /* Set device capabilities. */ 451 ic->ic_caps = 452 IEEE80211_C_STA /* station mode supported */ 453 | IEEE80211_C_IBSS /* IBSS mode supported */ 454 | IEEE80211_C_HOSTAP /* Host access point mode */ 455 | IEEE80211_C_MONITOR /* monitor mode supported */ 456 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 457 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 458 | IEEE80211_C_TXPMGT /* tx power management */ 459 | IEEE80211_C_SHSLOT /* short slot time supported */ 460 | IEEE80211_C_WPA /* 802.11i */ 461 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 462 | IEEE80211_C_WME /* 802.11e */ 463 | IEEE80211_C_PMGT /* Station-side power mgmt */ 464 ; 465 466 ic->ic_cryptocaps = 467 IEEE80211_CRYPTO_AES_CCM; 468 469 /* 470 * Read in the eeprom and also setup the channels for 471 * net80211. We don't set the rates as net80211 does this for us 472 */ 473 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 474 device_printf(dev, "could not read EEPROM, error %d\n", 475 error); 476 goto fail; 477 } 478 479 #ifdef WPI_DEBUG 480 if (bootverbose) { 481 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 482 sc->domain); 483 device_printf(sc->sc_dev, "Hardware Type: %c\n", 484 sc->type > 1 ? 'B': '?'); 485 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 486 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 487 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 488 supportsa ? "does" : "does not"); 489 490 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 491 check what sc->rev really represents - benjsc 20070615 */ 492 } 493 #endif 494 495 ieee80211_ifattach(ic); 496 ic->ic_vap_create = wpi_vap_create; 497 ic->ic_vap_delete = wpi_vap_delete; 498 ic->ic_parent = wpi_parent; 499 ic->ic_raw_xmit = wpi_raw_xmit; 500 ic->ic_transmit = wpi_transmit; 501 ic->ic_node_alloc = wpi_node_alloc; 502 sc->sc_node_free = ic->ic_node_free; 503 ic->ic_node_free = wpi_node_free; 504 ic->ic_wme.wme_update = wpi_updateedca; 505 ic->ic_update_promisc = wpi_update_promisc; 506 ic->ic_update_mcast = wpi_update_mcast; 507 ic->ic_newassoc = wpi_newassoc; 508 ic->ic_scan_start = wpi_scan_start; 509 ic->ic_scan_end = wpi_scan_end; 510 ic->ic_set_channel = wpi_set_channel; 511 ic->ic_scan_curchan = wpi_scan_curchan; 512 ic->ic_scan_mindwell = wpi_scan_mindwell; 513 ic->ic_setregdomain = wpi_setregdomain; 514 515 sc->sc_update_rx_ring = wpi_update_rx_ring; 516 sc->sc_update_tx_ring = wpi_update_tx_ring; 517 518 wpi_radiotap_attach(sc); 519 520 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 521 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 522 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 523 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 524 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 525 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 526 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 527 528 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 529 taskqueue_thread_enqueue, &sc->sc_tq); 530 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 531 if (error != 0) { 532 device_printf(dev, "can't start threads, error %d\n", error); 533 goto fail; 534 } 535 536 wpi_sysctlattach(sc); 537 538 /* 539 * Hook our interrupt after all initialization is complete. 540 */ 541 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 542 NULL, wpi_intr, sc, &sc->sc_ih); 543 if (error != 0) { 544 device_printf(dev, "can't establish interrupt, error %d\n", 545 error); 546 goto fail; 547 } 548 549 if (bootverbose) 550 ieee80211_announce(ic); 551 552 #ifdef WPI_DEBUG 553 if (sc->sc_debug & WPI_DEBUG_HW) 554 ieee80211_announce_channels(ic); 555 #endif 556 557 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 558 return 0; 559 560 fail: wpi_detach(dev); 561 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 562 return error; 563 } 564 565 /* 566 * Attach the interface to 802.11 radiotap. 567 */ 568 static void 569 wpi_radiotap_attach(struct wpi_softc *sc) 570 { 571 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 572 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 573 574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 575 ieee80211_radiotap_attach(&sc->sc_ic, 576 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 577 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 579 } 580 581 static void 582 wpi_sysctlattach(struct wpi_softc *sc) 583 { 584 #ifdef WPI_DEBUG 585 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 586 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 587 588 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 589 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 590 "control debugging printfs"); 591 #endif 592 } 593 594 static void 595 wpi_init_beacon(struct wpi_vap *wvp) 596 { 597 struct wpi_buf *bcn = &wvp->wv_bcbuf; 598 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 599 600 cmd->id = WPI_ID_BROADCAST; 601 cmd->ofdm_mask = 0xff; 602 cmd->cck_mask = 0x0f; 603 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 604 605 /* 606 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 607 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 608 */ 609 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 610 611 bcn->code = WPI_CMD_SET_BEACON; 612 bcn->ac = WPI_CMD_QUEUE_NUM; 613 bcn->size = sizeof(struct wpi_cmd_beacon); 614 } 615 616 static struct ieee80211vap * 617 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 618 enum ieee80211_opmode opmode, int flags, 619 const uint8_t bssid[IEEE80211_ADDR_LEN], 620 const uint8_t mac[IEEE80211_ADDR_LEN]) 621 { 622 struct wpi_vap *wvp; 623 struct ieee80211vap *vap; 624 625 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 626 return NULL; 627 628 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 629 vap = &wvp->wv_vap; 630 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 631 632 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 633 WPI_VAP_LOCK_INIT(wvp); 634 wpi_init_beacon(wvp); 635 } 636 637 /* Override with driver methods. */ 638 vap->iv_key_set = wpi_key_set; 639 vap->iv_key_delete = wpi_key_delete; 640 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 641 vap->iv_recv_mgmt = wpi_recv_mgmt; 642 wvp->wv_newstate = vap->iv_newstate; 643 vap->iv_newstate = wpi_newstate; 644 vap->iv_update_beacon = wpi_update_beacon; 645 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 646 647 ieee80211_ratectl_init(vap); 648 /* Complete setup. */ 649 ieee80211_vap_attach(vap, ieee80211_media_change, 650 ieee80211_media_status, mac); 651 ic->ic_opmode = opmode; 652 return vap; 653 } 654 655 static void 656 wpi_vap_delete(struct ieee80211vap *vap) 657 { 658 struct wpi_vap *wvp = WPI_VAP(vap); 659 struct wpi_buf *bcn = &wvp->wv_bcbuf; 660 enum ieee80211_opmode opmode = vap->iv_opmode; 661 662 ieee80211_ratectl_deinit(vap); 663 ieee80211_vap_detach(vap); 664 665 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 666 if (bcn->m != NULL) 667 m_freem(bcn->m); 668 669 WPI_VAP_LOCK_DESTROY(wvp); 670 } 671 672 free(wvp, M_80211_VAP); 673 } 674 675 static int 676 wpi_detach(device_t dev) 677 { 678 struct wpi_softc *sc = device_get_softc(dev); 679 struct ieee80211com *ic = &sc->sc_ic; 680 int qid; 681 682 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 683 684 if (ic->ic_vap_create == wpi_vap_create) { 685 ieee80211_draintask(ic, &sc->sc_radioon_task); 686 687 wpi_stop(sc); 688 689 if (sc->sc_tq != NULL) { 690 taskqueue_drain_all(sc->sc_tq); 691 taskqueue_free(sc->sc_tq); 692 } 693 694 callout_drain(&sc->watchdog_rfkill); 695 callout_drain(&sc->tx_timeout); 696 callout_drain(&sc->scan_timeout); 697 callout_drain(&sc->calib_to); 698 ieee80211_ifdetach(ic); 699 } 700 701 /* Uninstall interrupt handler. */ 702 if (sc->irq != NULL) { 703 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 704 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 705 sc->irq); 706 pci_release_msi(dev); 707 } 708 709 if (sc->txq[0].data_dmat) { 710 /* Free DMA resources. */ 711 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 712 wpi_free_tx_ring(sc, &sc->txq[qid]); 713 714 wpi_free_rx_ring(sc); 715 wpi_free_shared(sc); 716 } 717 718 if (sc->fw_dma.tag) 719 wpi_free_fwmem(sc); 720 721 if (sc->mem != NULL) 722 bus_release_resource(dev, SYS_RES_MEMORY, 723 rman_get_rid(sc->mem), sc->mem); 724 725 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 726 WPI_TXQ_STATE_LOCK_DESTROY(sc); 727 WPI_TXQ_LOCK_DESTROY(sc); 728 WPI_NT_LOCK_DESTROY(sc); 729 WPI_RXON_LOCK_DESTROY(sc); 730 WPI_TX_LOCK_DESTROY(sc); 731 WPI_LOCK_DESTROY(sc); 732 return 0; 733 } 734 735 static int 736 wpi_shutdown(device_t dev) 737 { 738 struct wpi_softc *sc = device_get_softc(dev); 739 740 wpi_stop(sc); 741 return 0; 742 } 743 744 static int 745 wpi_suspend(device_t dev) 746 { 747 struct wpi_softc *sc = device_get_softc(dev); 748 struct ieee80211com *ic = &sc->sc_ic; 749 750 ieee80211_suspend_all(ic); 751 return 0; 752 } 753 754 static int 755 wpi_resume(device_t dev) 756 { 757 struct wpi_softc *sc = device_get_softc(dev); 758 struct ieee80211com *ic = &sc->sc_ic; 759 760 /* Clear device-specific "PCI retry timeout" register (41h). */ 761 pci_write_config(dev, 0x41, 0, 1); 762 763 ieee80211_resume_all(ic); 764 return 0; 765 } 766 767 /* 768 * Grab exclusive access to NIC memory. 769 */ 770 static int 771 wpi_nic_lock(struct wpi_softc *sc) 772 { 773 int ntries; 774 775 /* Request exclusive access to NIC. */ 776 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 777 778 /* Spin until we actually get the lock. */ 779 for (ntries = 0; ntries < 1000; ntries++) { 780 if ((WPI_READ(sc, WPI_GP_CNTRL) & 781 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 782 WPI_GP_CNTRL_MAC_ACCESS_ENA) 783 return 0; 784 DELAY(10); 785 } 786 787 device_printf(sc->sc_dev, "could not lock memory\n"); 788 789 return ETIMEDOUT; 790 } 791 792 /* 793 * Release lock on NIC memory. 794 */ 795 static __inline void 796 wpi_nic_unlock(struct wpi_softc *sc) 797 { 798 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 799 } 800 801 static __inline uint32_t 802 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 803 { 804 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 805 WPI_BARRIER_READ_WRITE(sc); 806 return WPI_READ(sc, WPI_PRPH_RDATA); 807 } 808 809 static __inline void 810 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 811 { 812 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 813 WPI_BARRIER_WRITE(sc); 814 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 815 } 816 817 static __inline void 818 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 819 { 820 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 821 } 822 823 static __inline void 824 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 825 { 826 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 827 } 828 829 static __inline void 830 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 831 const uint32_t *data, int count) 832 { 833 for (; count > 0; count--, data++, addr += 4) 834 wpi_prph_write(sc, addr, *data); 835 } 836 837 static __inline uint32_t 838 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 839 { 840 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 841 WPI_BARRIER_READ_WRITE(sc); 842 return WPI_READ(sc, WPI_MEM_RDATA); 843 } 844 845 static __inline void 846 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 847 int count) 848 { 849 for (; count > 0; count--, addr += 4) 850 *data++ = wpi_mem_read(sc, addr); 851 } 852 853 static int 854 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 855 { 856 uint8_t *out = data; 857 uint32_t val; 858 int error, ntries; 859 860 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 861 862 if ((error = wpi_nic_lock(sc)) != 0) 863 return error; 864 865 for (; count > 0; count -= 2, addr++) { 866 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 867 for (ntries = 0; ntries < 10; ntries++) { 868 val = WPI_READ(sc, WPI_EEPROM); 869 if (val & WPI_EEPROM_READ_VALID) 870 break; 871 DELAY(5); 872 } 873 if (ntries == 10) { 874 device_printf(sc->sc_dev, 875 "timeout reading ROM at 0x%x\n", addr); 876 return ETIMEDOUT; 877 } 878 *out++= val >> 16; 879 if (count > 1) 880 *out ++= val >> 24; 881 } 882 883 wpi_nic_unlock(sc); 884 885 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 886 887 return 0; 888 } 889 890 static void 891 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 892 { 893 if (error != 0) 894 return; 895 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 896 *(bus_addr_t *)arg = segs[0].ds_addr; 897 } 898 899 /* 900 * Allocates a contiguous block of dma memory of the requested size and 901 * alignment. 902 */ 903 static int 904 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 905 void **kvap, bus_size_t size, bus_size_t alignment) 906 { 907 int error; 908 909 dma->tag = NULL; 910 dma->size = size; 911 912 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 913 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 914 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 915 if (error != 0) 916 goto fail; 917 918 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 919 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 920 if (error != 0) 921 goto fail; 922 923 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 924 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 925 if (error != 0) 926 goto fail; 927 928 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 929 930 if (kvap != NULL) 931 *kvap = dma->vaddr; 932 933 return 0; 934 935 fail: wpi_dma_contig_free(dma); 936 return error; 937 } 938 939 static void 940 wpi_dma_contig_free(struct wpi_dma_info *dma) 941 { 942 if (dma->vaddr != NULL) { 943 bus_dmamap_sync(dma->tag, dma->map, 944 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 945 bus_dmamap_unload(dma->tag, dma->map); 946 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 947 dma->vaddr = NULL; 948 } 949 if (dma->tag != NULL) { 950 bus_dma_tag_destroy(dma->tag); 951 dma->tag = NULL; 952 } 953 } 954 955 /* 956 * Allocate a shared page between host and NIC. 957 */ 958 static int 959 wpi_alloc_shared(struct wpi_softc *sc) 960 { 961 /* Shared buffer must be aligned on a 4KB boundary. */ 962 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 963 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 964 } 965 966 static void 967 wpi_free_shared(struct wpi_softc *sc) 968 { 969 wpi_dma_contig_free(&sc->shared_dma); 970 } 971 972 /* 973 * Allocate DMA-safe memory for firmware transfer. 974 */ 975 static int 976 wpi_alloc_fwmem(struct wpi_softc *sc) 977 { 978 /* Must be aligned on a 16-byte boundary. */ 979 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 980 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 981 } 982 983 static void 984 wpi_free_fwmem(struct wpi_softc *sc) 985 { 986 wpi_dma_contig_free(&sc->fw_dma); 987 } 988 989 static int 990 wpi_alloc_rx_ring(struct wpi_softc *sc) 991 { 992 struct wpi_rx_ring *ring = &sc->rxq; 993 bus_size_t size; 994 int i, error; 995 996 ring->cur = 0; 997 ring->update = 0; 998 999 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1000 1001 /* Allocate RX descriptors (16KB aligned.) */ 1002 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1003 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1004 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1005 if (error != 0) { 1006 device_printf(sc->sc_dev, 1007 "%s: could not allocate RX ring DMA memory, error %d\n", 1008 __func__, error); 1009 goto fail; 1010 } 1011 1012 /* Create RX buffer DMA tag. */ 1013 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1014 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1015 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1016 &ring->data_dmat); 1017 if (error != 0) { 1018 device_printf(sc->sc_dev, 1019 "%s: could not create RX buf DMA tag, error %d\n", 1020 __func__, error); 1021 goto fail; 1022 } 1023 1024 /* 1025 * Allocate and map RX buffers. 1026 */ 1027 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1028 struct wpi_rx_data *data = &ring->data[i]; 1029 bus_addr_t paddr; 1030 1031 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1032 if (error != 0) { 1033 device_printf(sc->sc_dev, 1034 "%s: could not create RX buf DMA map, error %d\n", 1035 __func__, error); 1036 goto fail; 1037 } 1038 1039 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1040 if (data->m == NULL) { 1041 device_printf(sc->sc_dev, 1042 "%s: could not allocate RX mbuf\n", __func__); 1043 error = ENOBUFS; 1044 goto fail; 1045 } 1046 1047 error = bus_dmamap_load(ring->data_dmat, data->map, 1048 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1049 &paddr, BUS_DMA_NOWAIT); 1050 if (error != 0 && error != EFBIG) { 1051 device_printf(sc->sc_dev, 1052 "%s: can't map mbuf (error %d)\n", __func__, 1053 error); 1054 goto fail; 1055 } 1056 1057 /* Set physical address of RX buffer. */ 1058 ring->desc[i] = htole32(paddr); 1059 } 1060 1061 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1062 BUS_DMASYNC_PREWRITE); 1063 1064 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1065 1066 return 0; 1067 1068 fail: wpi_free_rx_ring(sc); 1069 1070 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1071 1072 return error; 1073 } 1074 1075 static void 1076 wpi_update_rx_ring(struct wpi_softc *sc) 1077 { 1078 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1079 } 1080 1081 static void 1082 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1083 { 1084 struct wpi_rx_ring *ring = &sc->rxq; 1085 1086 if (ring->update != 0) { 1087 /* Wait for INT_WAKEUP event. */ 1088 return; 1089 } 1090 1091 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1092 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1093 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1094 __func__); 1095 ring->update = 1; 1096 } else { 1097 wpi_update_rx_ring(sc); 1098 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1099 } 1100 } 1101 1102 static void 1103 wpi_reset_rx_ring(struct wpi_softc *sc) 1104 { 1105 struct wpi_rx_ring *ring = &sc->rxq; 1106 int ntries; 1107 1108 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1109 1110 if (wpi_nic_lock(sc) == 0) { 1111 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1112 for (ntries = 0; ntries < 1000; ntries++) { 1113 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1114 WPI_FH_RX_STATUS_IDLE) 1115 break; 1116 DELAY(10); 1117 } 1118 wpi_nic_unlock(sc); 1119 } 1120 1121 ring->cur = 0; 1122 ring->update = 0; 1123 } 1124 1125 static void 1126 wpi_free_rx_ring(struct wpi_softc *sc) 1127 { 1128 struct wpi_rx_ring *ring = &sc->rxq; 1129 int i; 1130 1131 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1132 1133 wpi_dma_contig_free(&ring->desc_dma); 1134 1135 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1136 struct wpi_rx_data *data = &ring->data[i]; 1137 1138 if (data->m != NULL) { 1139 bus_dmamap_sync(ring->data_dmat, data->map, 1140 BUS_DMASYNC_POSTREAD); 1141 bus_dmamap_unload(ring->data_dmat, data->map); 1142 m_freem(data->m); 1143 data->m = NULL; 1144 } 1145 if (data->map != NULL) 1146 bus_dmamap_destroy(ring->data_dmat, data->map); 1147 } 1148 if (ring->data_dmat != NULL) { 1149 bus_dma_tag_destroy(ring->data_dmat); 1150 ring->data_dmat = NULL; 1151 } 1152 } 1153 1154 static int 1155 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1156 { 1157 bus_addr_t paddr; 1158 bus_size_t size; 1159 int i, error; 1160 1161 ring->qid = qid; 1162 ring->queued = 0; 1163 ring->cur = 0; 1164 ring->update = 0; 1165 1166 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1167 1168 /* Allocate TX descriptors (16KB aligned.) */ 1169 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1170 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1171 size, WPI_RING_DMA_ALIGN); 1172 if (error != 0) { 1173 device_printf(sc->sc_dev, 1174 "%s: could not allocate TX ring DMA memory, error %d\n", 1175 __func__, error); 1176 goto fail; 1177 } 1178 1179 /* Update shared area with ring physical address. */ 1180 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1181 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1182 BUS_DMASYNC_PREWRITE); 1183 1184 /* 1185 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1186 * to allocate commands space for other rings. 1187 * XXX Do we really need to allocate descriptors for other rings? 1188 */ 1189 if (qid > WPI_CMD_QUEUE_NUM) { 1190 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1191 return 0; 1192 } 1193 1194 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1195 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1196 size, 4); 1197 if (error != 0) { 1198 device_printf(sc->sc_dev, 1199 "%s: could not allocate TX cmd DMA memory, error %d\n", 1200 __func__, error); 1201 goto fail; 1202 } 1203 1204 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1206 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1207 &ring->data_dmat); 1208 if (error != 0) { 1209 device_printf(sc->sc_dev, 1210 "%s: could not create TX buf DMA tag, error %d\n", 1211 __func__, error); 1212 goto fail; 1213 } 1214 1215 paddr = ring->cmd_dma.paddr; 1216 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1217 struct wpi_tx_data *data = &ring->data[i]; 1218 1219 data->cmd_paddr = paddr; 1220 paddr += sizeof (struct wpi_tx_cmd); 1221 1222 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1223 if (error != 0) { 1224 device_printf(sc->sc_dev, 1225 "%s: could not create TX buf DMA map, error %d\n", 1226 __func__, error); 1227 goto fail; 1228 } 1229 } 1230 1231 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1232 1233 return 0; 1234 1235 fail: wpi_free_tx_ring(sc, ring); 1236 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1237 return error; 1238 } 1239 1240 static void 1241 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1242 { 1243 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1244 } 1245 1246 static void 1247 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1248 { 1249 1250 if (ring->update != 0) { 1251 /* Wait for INT_WAKEUP event. */ 1252 return; 1253 } 1254 1255 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1256 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1257 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1258 __func__, ring->qid); 1259 ring->update = 1; 1260 } else { 1261 wpi_update_tx_ring(sc, ring); 1262 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1263 } 1264 } 1265 1266 static void 1267 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1268 { 1269 int i; 1270 1271 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1272 1273 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1274 struct wpi_tx_data *data = &ring->data[i]; 1275 1276 if (data->m != NULL) { 1277 bus_dmamap_sync(ring->data_dmat, data->map, 1278 BUS_DMASYNC_POSTWRITE); 1279 bus_dmamap_unload(ring->data_dmat, data->map); 1280 m_freem(data->m); 1281 data->m = NULL; 1282 } 1283 if (data->ni != NULL) { 1284 ieee80211_free_node(data->ni); 1285 data->ni = NULL; 1286 } 1287 } 1288 /* Clear TX descriptors. */ 1289 memset(ring->desc, 0, ring->desc_dma.size); 1290 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1291 BUS_DMASYNC_PREWRITE); 1292 ring->queued = 0; 1293 ring->cur = 0; 1294 ring->update = 0; 1295 } 1296 1297 static void 1298 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1299 { 1300 int i; 1301 1302 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1303 1304 wpi_dma_contig_free(&ring->desc_dma); 1305 wpi_dma_contig_free(&ring->cmd_dma); 1306 1307 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1308 struct wpi_tx_data *data = &ring->data[i]; 1309 1310 if (data->m != NULL) { 1311 bus_dmamap_sync(ring->data_dmat, data->map, 1312 BUS_DMASYNC_POSTWRITE); 1313 bus_dmamap_unload(ring->data_dmat, data->map); 1314 m_freem(data->m); 1315 } 1316 if (data->map != NULL) 1317 bus_dmamap_destroy(ring->data_dmat, data->map); 1318 } 1319 if (ring->data_dmat != NULL) { 1320 bus_dma_tag_destroy(ring->data_dmat); 1321 ring->data_dmat = NULL; 1322 } 1323 } 1324 1325 /* 1326 * Extract various information from EEPROM. 1327 */ 1328 static int 1329 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1330 { 1331 #define WPI_CHK(res) do { \ 1332 if ((error = res) != 0) \ 1333 goto fail; \ 1334 } while (0) 1335 int error, i; 1336 1337 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1338 1339 /* Adapter has to be powered on for EEPROM access to work. */ 1340 if ((error = wpi_apm_init(sc)) != 0) { 1341 device_printf(sc->sc_dev, 1342 "%s: could not power ON adapter, error %d\n", __func__, 1343 error); 1344 return error; 1345 } 1346 1347 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1348 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1349 error = EIO; 1350 goto fail; 1351 } 1352 /* Clear HW ownership of EEPROM. */ 1353 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1354 1355 /* Read the hardware capabilities, revision and SKU type. */ 1356 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1357 sizeof(sc->cap))); 1358 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1359 sizeof(sc->rev))); 1360 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1361 sizeof(sc->type))); 1362 1363 sc->rev = le16toh(sc->rev); 1364 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1365 sc->rev, sc->type); 1366 1367 /* Read the regulatory domain (4 ASCII characters.) */ 1368 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1369 sizeof(sc->domain))); 1370 1371 /* Read MAC address. */ 1372 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1373 IEEE80211_ADDR_LEN)); 1374 1375 /* Read the list of authorized channels. */ 1376 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1377 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1378 1379 /* Read the list of TX power groups. */ 1380 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1381 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1382 1383 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1384 1385 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1386 __func__); 1387 1388 return error; 1389 #undef WPI_CHK 1390 } 1391 1392 /* 1393 * Translate EEPROM flags to net80211. 1394 */ 1395 static uint32_t 1396 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1397 { 1398 uint32_t nflags; 1399 1400 nflags = 0; 1401 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1402 nflags |= IEEE80211_CHAN_PASSIVE; 1403 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1404 nflags |= IEEE80211_CHAN_NOADHOC; 1405 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1406 nflags |= IEEE80211_CHAN_DFS; 1407 /* XXX apparently IBSS may still be marked */ 1408 nflags |= IEEE80211_CHAN_NOADHOC; 1409 } 1410 1411 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1412 if (nflags & IEEE80211_CHAN_NOADHOC) 1413 nflags |= IEEE80211_CHAN_NOHOSTAP; 1414 1415 return nflags; 1416 } 1417 1418 static void 1419 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1420 { 1421 struct ieee80211com *ic = &sc->sc_ic; 1422 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1423 const struct wpi_chan_band *band = &wpi_bands[n]; 1424 struct ieee80211_channel *c; 1425 uint8_t chan; 1426 int i, nflags; 1427 1428 for (i = 0; i < band->nchan; i++) { 1429 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1430 DPRINTF(sc, WPI_DEBUG_EEPROM, 1431 "Channel Not Valid: %d, band %d\n", 1432 band->chan[i],n); 1433 continue; 1434 } 1435 1436 chan = band->chan[i]; 1437 nflags = wpi_eeprom_channel_flags(&channels[i]); 1438 1439 c = &ic->ic_channels[ic->ic_nchans++]; 1440 c->ic_ieee = chan; 1441 c->ic_maxregpower = channels[i].maxpwr; 1442 c->ic_maxpower = 2*c->ic_maxregpower; 1443 1444 if (n == 0) { /* 2GHz band */ 1445 c->ic_freq = ieee80211_ieee2mhz(chan, 1446 IEEE80211_CHAN_G); 1447 1448 /* G =>'s B is supported */ 1449 c->ic_flags = IEEE80211_CHAN_B | nflags; 1450 c = &ic->ic_channels[ic->ic_nchans++]; 1451 c[0] = c[-1]; 1452 c->ic_flags = IEEE80211_CHAN_G | nflags; 1453 } else { /* 5GHz band */ 1454 c->ic_freq = ieee80211_ieee2mhz(chan, 1455 IEEE80211_CHAN_A); 1456 1457 c->ic_flags = IEEE80211_CHAN_A | nflags; 1458 } 1459 1460 /* Save maximum allowed TX power for this channel. */ 1461 sc->maxpwr[chan] = channels[i].maxpwr; 1462 1463 DPRINTF(sc, WPI_DEBUG_EEPROM, 1464 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1465 " offset %d\n", chan, c->ic_freq, 1466 channels[i].flags, sc->maxpwr[chan], 1467 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1468 } 1469 } 1470 1471 /** 1472 * Read the eeprom to find out what channels are valid for the given 1473 * band and update net80211 with what we find. 1474 */ 1475 static int 1476 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1477 { 1478 struct ieee80211com *ic = &sc->sc_ic; 1479 const struct wpi_chan_band *band = &wpi_bands[n]; 1480 int error; 1481 1482 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1483 1484 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1485 band->nchan * sizeof (struct wpi_eeprom_chan)); 1486 if (error != 0) { 1487 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1488 return error; 1489 } 1490 1491 wpi_read_eeprom_band(sc, n); 1492 1493 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1494 1495 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1496 1497 return 0; 1498 } 1499 1500 static struct wpi_eeprom_chan * 1501 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1502 { 1503 int i, j; 1504 1505 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1506 for (i = 0; i < wpi_bands[j].nchan; i++) 1507 if (wpi_bands[j].chan[i] == c->ic_ieee) 1508 return &sc->eeprom_channels[j][i]; 1509 1510 return NULL; 1511 } 1512 1513 /* 1514 * Enforce flags read from EEPROM. 1515 */ 1516 static int 1517 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1518 int nchan, struct ieee80211_channel chans[]) 1519 { 1520 struct wpi_softc *sc = ic->ic_softc; 1521 int i; 1522 1523 for (i = 0; i < nchan; i++) { 1524 struct ieee80211_channel *c = &chans[i]; 1525 struct wpi_eeprom_chan *channel; 1526 1527 channel = wpi_find_eeprom_channel(sc, c); 1528 if (channel == NULL) { 1529 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1530 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1531 return EINVAL; 1532 } 1533 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1534 } 1535 1536 return 0; 1537 } 1538 1539 static int 1540 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1541 { 1542 struct wpi_power_group *group = &sc->groups[n]; 1543 struct wpi_eeprom_group rgroup; 1544 int i, error; 1545 1546 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1547 1548 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1549 &rgroup, sizeof rgroup)) != 0) { 1550 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1551 return error; 1552 } 1553 1554 /* Save TX power group information. */ 1555 group->chan = rgroup.chan; 1556 group->maxpwr = rgroup.maxpwr; 1557 /* Retrieve temperature at which the samples were taken. */ 1558 group->temp = (int16_t)le16toh(rgroup.temp); 1559 1560 DPRINTF(sc, WPI_DEBUG_EEPROM, 1561 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1562 group->maxpwr, group->temp); 1563 1564 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1565 group->samples[i].index = rgroup.samples[i].index; 1566 group->samples[i].power = rgroup.samples[i].power; 1567 1568 DPRINTF(sc, WPI_DEBUG_EEPROM, 1569 "\tsample %d: index=%d power=%d\n", i, 1570 group->samples[i].index, group->samples[i].power); 1571 } 1572 1573 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1574 1575 return 0; 1576 } 1577 1578 static int 1579 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1580 { 1581 int newid = WPI_ID_IBSS_MIN; 1582 1583 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1584 if ((sc->nodesmsk & (1 << newid)) == 0) { 1585 sc->nodesmsk |= 1 << newid; 1586 return newid; 1587 } 1588 } 1589 1590 return WPI_ID_UNDEFINED; 1591 } 1592 1593 static __inline int 1594 wpi_add_node_entry_sta(struct wpi_softc *sc) 1595 { 1596 sc->nodesmsk |= 1 << WPI_ID_BSS; 1597 1598 return WPI_ID_BSS; 1599 } 1600 1601 static __inline int 1602 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1603 { 1604 if (id == WPI_ID_UNDEFINED) 1605 return 0; 1606 1607 return (sc->nodesmsk >> id) & 1; 1608 } 1609 1610 static __inline void 1611 wpi_clear_node_table(struct wpi_softc *sc) 1612 { 1613 sc->nodesmsk = 0; 1614 } 1615 1616 static __inline void 1617 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1618 { 1619 sc->nodesmsk &= ~(1 << id); 1620 } 1621 1622 static struct ieee80211_node * 1623 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1624 { 1625 struct wpi_node *wn; 1626 1627 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1628 M_NOWAIT | M_ZERO); 1629 1630 if (wn == NULL) 1631 return NULL; 1632 1633 wn->id = WPI_ID_UNDEFINED; 1634 1635 return &wn->ni; 1636 } 1637 1638 static void 1639 wpi_node_free(struct ieee80211_node *ni) 1640 { 1641 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1642 struct wpi_node *wn = WPI_NODE(ni); 1643 1644 if (wn->id != WPI_ID_UNDEFINED) { 1645 WPI_NT_LOCK(sc); 1646 if (wpi_check_node_entry(sc, wn->id)) { 1647 wpi_del_node_entry(sc, wn->id); 1648 wpi_del_node(sc, ni); 1649 } 1650 WPI_NT_UNLOCK(sc); 1651 } 1652 1653 sc->sc_node_free(ni); 1654 } 1655 1656 static __inline int 1657 wpi_check_bss_filter(struct wpi_softc *sc) 1658 { 1659 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1660 } 1661 1662 static void 1663 wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1664 const struct ieee80211_rx_stats *rxs, 1665 int rssi, int nf) 1666 { 1667 struct ieee80211vap *vap = ni->ni_vap; 1668 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1669 struct wpi_vap *wvp = WPI_VAP(vap); 1670 uint64_t ni_tstamp, rx_tstamp; 1671 1672 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1673 1674 if (vap->iv_opmode == IEEE80211_M_IBSS && 1675 vap->iv_state == IEEE80211_S_RUN && 1676 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1677 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1678 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1679 rx_tstamp = le64toh(sc->rx_tstamp); 1680 1681 if (ni_tstamp >= rx_tstamp) { 1682 DPRINTF(sc, WPI_DEBUG_STATE, 1683 "ibss merge, tsf %ju tstamp %ju\n", 1684 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1685 (void) ieee80211_ibss_merge(ni); 1686 } 1687 } 1688 } 1689 1690 static void 1691 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1692 { 1693 struct wpi_softc *sc = arg; 1694 struct wpi_node *wn = WPI_NODE(ni); 1695 int error; 1696 1697 WPI_NT_LOCK(sc); 1698 if (wn->id != WPI_ID_UNDEFINED) { 1699 wn->id = WPI_ID_UNDEFINED; 1700 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1701 device_printf(sc->sc_dev, 1702 "%s: could not add IBSS node, error %d\n", 1703 __func__, error); 1704 } 1705 } 1706 WPI_NT_UNLOCK(sc); 1707 } 1708 1709 static void 1710 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1711 { 1712 struct ieee80211com *ic = &sc->sc_ic; 1713 1714 /* Set group keys once. */ 1715 WPI_NT_LOCK(sc); 1716 wvp->wv_gtk = 0; 1717 WPI_NT_UNLOCK(sc); 1718 1719 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1720 ieee80211_crypto_reload_keys(ic); 1721 } 1722 1723 /** 1724 * Called by net80211 when ever there is a change to 80211 state machine 1725 */ 1726 static int 1727 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1728 { 1729 struct wpi_vap *wvp = WPI_VAP(vap); 1730 struct ieee80211com *ic = vap->iv_ic; 1731 struct wpi_softc *sc = ic->ic_softc; 1732 int error = 0; 1733 1734 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1735 1736 WPI_TXQ_LOCK(sc); 1737 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1738 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1739 WPI_TXQ_UNLOCK(sc); 1740 1741 return ENXIO; 1742 } 1743 WPI_TXQ_UNLOCK(sc); 1744 1745 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1746 ieee80211_state_name[vap->iv_state], 1747 ieee80211_state_name[nstate]); 1748 1749 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1750 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1751 device_printf(sc->sc_dev, 1752 "%s: could not set power saving level\n", 1753 __func__); 1754 return error; 1755 } 1756 1757 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1758 } 1759 1760 switch (nstate) { 1761 case IEEE80211_S_SCAN: 1762 WPI_RXON_LOCK(sc); 1763 if (wpi_check_bss_filter(sc) != 0) { 1764 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1765 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1766 device_printf(sc->sc_dev, 1767 "%s: could not send RXON\n", __func__); 1768 } 1769 } 1770 WPI_RXON_UNLOCK(sc); 1771 break; 1772 1773 case IEEE80211_S_ASSOC: 1774 if (vap->iv_state != IEEE80211_S_RUN) 1775 break; 1776 /* FALLTHROUGH */ 1777 case IEEE80211_S_AUTH: 1778 /* 1779 * NB: do not optimize AUTH -> AUTH state transmission - 1780 * this will break powersave with non-QoS AP! 1781 */ 1782 1783 /* 1784 * The node must be registered in the firmware before auth. 1785 * Also the associd must be cleared on RUN -> ASSOC 1786 * transitions. 1787 */ 1788 if ((error = wpi_auth(sc, vap)) != 0) { 1789 device_printf(sc->sc_dev, 1790 "%s: could not move to AUTH state, error %d\n", 1791 __func__, error); 1792 } 1793 break; 1794 1795 case IEEE80211_S_RUN: 1796 /* 1797 * RUN -> RUN transition: 1798 * STA mode: Just restart the timers. 1799 * IBSS mode: Process IBSS merge. 1800 */ 1801 if (vap->iv_state == IEEE80211_S_RUN) { 1802 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1803 WPI_RXON_LOCK(sc); 1804 wpi_calib_timeout(sc); 1805 WPI_RXON_UNLOCK(sc); 1806 break; 1807 } else { 1808 /* 1809 * Drop the BSS_FILTER bit 1810 * (there is no another way to change bssid). 1811 */ 1812 WPI_RXON_LOCK(sc); 1813 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1814 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1815 device_printf(sc->sc_dev, 1816 "%s: could not send RXON\n", 1817 __func__); 1818 } 1819 WPI_RXON_UNLOCK(sc); 1820 1821 /* Restore all what was lost. */ 1822 wpi_restore_node_table(sc, wvp); 1823 1824 /* XXX set conditionally? */ 1825 wpi_updateedca(ic); 1826 } 1827 } 1828 1829 /* 1830 * !RUN -> RUN requires setting the association id 1831 * which is done with a firmware cmd. We also defer 1832 * starting the timers until that work is done. 1833 */ 1834 if ((error = wpi_run(sc, vap)) != 0) { 1835 device_printf(sc->sc_dev, 1836 "%s: could not move to RUN state\n", __func__); 1837 } 1838 break; 1839 1840 default: 1841 break; 1842 } 1843 if (error != 0) { 1844 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1845 return error; 1846 } 1847 1848 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1849 1850 return wvp->wv_newstate(vap, nstate, arg); 1851 } 1852 1853 static void 1854 wpi_calib_timeout(void *arg) 1855 { 1856 struct wpi_softc *sc = arg; 1857 1858 if (wpi_check_bss_filter(sc) == 0) 1859 return; 1860 1861 wpi_power_calibration(sc); 1862 1863 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1864 } 1865 1866 static __inline uint8_t 1867 rate2plcp(const uint8_t rate) 1868 { 1869 switch (rate) { 1870 case 12: return 0xd; 1871 case 18: return 0xf; 1872 case 24: return 0x5; 1873 case 36: return 0x7; 1874 case 48: return 0x9; 1875 case 72: return 0xb; 1876 case 96: return 0x1; 1877 case 108: return 0x3; 1878 case 2: return 10; 1879 case 4: return 20; 1880 case 11: return 55; 1881 case 22: return 110; 1882 default: return 0; 1883 } 1884 } 1885 1886 static __inline uint8_t 1887 plcp2rate(const uint8_t plcp) 1888 { 1889 switch (plcp) { 1890 case 0xd: return 12; 1891 case 0xf: return 18; 1892 case 0x5: return 24; 1893 case 0x7: return 36; 1894 case 0x9: return 48; 1895 case 0xb: return 72; 1896 case 0x1: return 96; 1897 case 0x3: return 108; 1898 case 10: return 2; 1899 case 20: return 4; 1900 case 55: return 11; 1901 case 110: return 22; 1902 default: return 0; 1903 } 1904 } 1905 1906 /* Quickly determine if a given rate is CCK or OFDM. */ 1907 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1908 1909 static void 1910 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1911 struct wpi_rx_data *data) 1912 { 1913 struct ieee80211com *ic = &sc->sc_ic; 1914 struct wpi_rx_ring *ring = &sc->rxq; 1915 struct wpi_rx_stat *stat; 1916 struct wpi_rx_head *head; 1917 struct wpi_rx_tail *tail; 1918 struct ieee80211_frame *wh; 1919 struct ieee80211_node *ni; 1920 struct mbuf *m, *m1; 1921 bus_addr_t paddr; 1922 uint32_t flags; 1923 uint16_t len; 1924 int error; 1925 1926 stat = (struct wpi_rx_stat *)(desc + 1); 1927 1928 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1929 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1930 goto fail1; 1931 } 1932 1933 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1934 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1935 len = le16toh(head->len); 1936 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1937 flags = le32toh(tail->flags); 1938 1939 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1940 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1941 le32toh(desc->len), len, (int8_t)stat->rssi, 1942 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1943 1944 /* Discard frames with a bad FCS early. */ 1945 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1946 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1947 __func__, flags); 1948 goto fail1; 1949 } 1950 /* Discard frames that are too short. */ 1951 if (len < sizeof (struct ieee80211_frame_ack)) { 1952 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1953 __func__, len); 1954 goto fail1; 1955 } 1956 1957 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1958 if (__predict_false(m1 == NULL)) { 1959 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1960 __func__); 1961 goto fail1; 1962 } 1963 bus_dmamap_unload(ring->data_dmat, data->map); 1964 1965 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1966 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1967 if (__predict_false(error != 0 && error != EFBIG)) { 1968 device_printf(sc->sc_dev, 1969 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1970 m_freem(m1); 1971 1972 /* Try to reload the old mbuf. */ 1973 error = bus_dmamap_load(ring->data_dmat, data->map, 1974 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1975 &paddr, BUS_DMA_NOWAIT); 1976 if (error != 0 && error != EFBIG) { 1977 panic("%s: could not load old RX mbuf", __func__); 1978 } 1979 /* Physical address may have changed. */ 1980 ring->desc[ring->cur] = htole32(paddr); 1981 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1982 BUS_DMASYNC_PREWRITE); 1983 goto fail1; 1984 } 1985 1986 m = data->m; 1987 data->m = m1; 1988 /* Update RX descriptor. */ 1989 ring->desc[ring->cur] = htole32(paddr); 1990 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1991 BUS_DMASYNC_PREWRITE); 1992 1993 /* Finalize mbuf. */ 1994 m->m_data = (caddr_t)(head + 1); 1995 m->m_pkthdr.len = m->m_len = len; 1996 1997 /* Grab a reference to the source node. */ 1998 wh = mtod(m, struct ieee80211_frame *); 1999 2000 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2001 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2002 /* Check whether decryption was successful or not. */ 2003 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2004 DPRINTF(sc, WPI_DEBUG_RECV, 2005 "CCMP decryption failed 0x%x\n", flags); 2006 goto fail2; 2007 } 2008 m->m_flags |= M_WEP; 2009 } 2010 2011 if (len >= sizeof(struct ieee80211_frame_min)) 2012 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2013 else 2014 ni = NULL; 2015 2016 sc->rx_tstamp = tail->tstamp; 2017 2018 if (ieee80211_radiotap_active(ic)) { 2019 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2020 2021 tap->wr_flags = 0; 2022 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2023 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2024 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2025 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2026 tap->wr_tsft = tail->tstamp; 2027 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2028 tap->wr_rate = plcp2rate(head->plcp); 2029 } 2030 2031 WPI_UNLOCK(sc); 2032 2033 /* Send the frame to the 802.11 layer. */ 2034 if (ni != NULL) { 2035 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2036 /* Node is no longer needed. */ 2037 ieee80211_free_node(ni); 2038 } else 2039 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2040 2041 WPI_LOCK(sc); 2042 2043 return; 2044 2045 fail2: m_freem(m); 2046 2047 fail1: counter_u64_add(ic->ic_ierrors, 1); 2048 } 2049 2050 static void 2051 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2052 struct wpi_rx_data *data) 2053 { 2054 /* Ignore */ 2055 } 2056 2057 static void 2058 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2059 { 2060 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2061 struct wpi_tx_data *data = &ring->data[desc->idx]; 2062 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2063 struct mbuf *m; 2064 struct ieee80211_node *ni; 2065 struct ieee80211vap *vap; 2066 struct ieee80211com *ic; 2067 uint32_t status = le32toh(stat->status); 2068 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2069 2070 KASSERT(data->ni != NULL, ("no node")); 2071 KASSERT(data->m != NULL, ("no mbuf")); 2072 2073 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2074 2075 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2076 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2077 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2078 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2079 2080 /* Unmap and free mbuf. */ 2081 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2082 bus_dmamap_unload(ring->data_dmat, data->map); 2083 m = data->m, data->m = NULL; 2084 ni = data->ni, data->ni = NULL; 2085 vap = ni->ni_vap; 2086 ic = vap->iv_ic; 2087 2088 /* 2089 * Update rate control statistics for the node. 2090 */ 2091 if (status & WPI_TX_STATUS_FAIL) { 2092 ieee80211_ratectl_tx_complete(vap, ni, 2093 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2094 } else 2095 ieee80211_ratectl_tx_complete(vap, ni, 2096 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2097 2098 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2099 2100 WPI_TXQ_STATE_LOCK(sc); 2101 if (--ring->queued > 0) 2102 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2103 else 2104 callout_stop(&sc->tx_timeout); 2105 WPI_TXQ_STATE_UNLOCK(sc); 2106 2107 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2108 } 2109 2110 /* 2111 * Process a "command done" firmware notification. This is where we wakeup 2112 * processes waiting for a synchronous command completion. 2113 */ 2114 static void 2115 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2116 { 2117 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2118 struct wpi_tx_data *data; 2119 struct wpi_tx_cmd *cmd; 2120 2121 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2122 "type %s len %d\n", desc->qid, desc->idx, 2123 desc->flags, wpi_cmd_str(desc->type), 2124 le32toh(desc->len)); 2125 2126 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2127 return; /* Not a command ack. */ 2128 2129 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2130 2131 data = &ring->data[desc->idx]; 2132 cmd = &ring->cmd[desc->idx]; 2133 2134 /* If the command was mapped in an mbuf, free it. */ 2135 if (data->m != NULL) { 2136 bus_dmamap_sync(ring->data_dmat, data->map, 2137 BUS_DMASYNC_POSTWRITE); 2138 bus_dmamap_unload(ring->data_dmat, data->map); 2139 m_freem(data->m); 2140 data->m = NULL; 2141 } 2142 2143 wakeup(cmd); 2144 2145 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2146 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2147 2148 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2149 BUS_DMASYNC_POSTREAD); 2150 2151 WPI_TXQ_LOCK(sc); 2152 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2153 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2154 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2155 } else { 2156 sc->sc_update_rx_ring = wpi_update_rx_ring; 2157 sc->sc_update_tx_ring = wpi_update_tx_ring; 2158 } 2159 WPI_TXQ_UNLOCK(sc); 2160 } 2161 } 2162 2163 static void 2164 wpi_notif_intr(struct wpi_softc *sc) 2165 { 2166 struct ieee80211com *ic = &sc->sc_ic; 2167 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2168 uint32_t hw; 2169 2170 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2171 BUS_DMASYNC_POSTREAD); 2172 2173 hw = le32toh(sc->shared->next) & 0xfff; 2174 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2175 2176 while (sc->rxq.cur != hw) { 2177 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2178 2179 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2180 struct wpi_rx_desc *desc; 2181 2182 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2183 BUS_DMASYNC_POSTREAD); 2184 desc = mtod(data->m, struct wpi_rx_desc *); 2185 2186 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2187 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2188 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2189 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2190 2191 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2192 /* Reply to a command. */ 2193 wpi_cmd_done(sc, desc); 2194 } 2195 2196 switch (desc->type) { 2197 case WPI_RX_DONE: 2198 /* An 802.11 frame has been received. */ 2199 wpi_rx_done(sc, desc, data); 2200 2201 if (__predict_false(sc->sc_running == 0)) { 2202 /* wpi_stop() was called. */ 2203 return; 2204 } 2205 2206 break; 2207 2208 case WPI_TX_DONE: 2209 /* An 802.11 frame has been transmitted. */ 2210 wpi_tx_done(sc, desc); 2211 break; 2212 2213 case WPI_RX_STATISTICS: 2214 case WPI_BEACON_STATISTICS: 2215 wpi_rx_statistics(sc, desc, data); 2216 break; 2217 2218 case WPI_BEACON_MISSED: 2219 { 2220 struct wpi_beacon_missed *miss = 2221 (struct wpi_beacon_missed *)(desc + 1); 2222 uint32_t expected, misses, received, threshold; 2223 2224 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2225 BUS_DMASYNC_POSTREAD); 2226 2227 misses = le32toh(miss->consecutive); 2228 expected = le32toh(miss->expected); 2229 received = le32toh(miss->received); 2230 threshold = MAX(2, vap->iv_bmissthreshold); 2231 2232 DPRINTF(sc, WPI_DEBUG_BMISS, 2233 "%s: beacons missed %u(%u) (received %u/%u)\n", 2234 __func__, misses, le32toh(miss->total), received, 2235 expected); 2236 2237 if (misses >= threshold || 2238 (received == 0 && expected >= threshold)) { 2239 WPI_RXON_LOCK(sc); 2240 if (callout_pending(&sc->scan_timeout)) { 2241 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2242 0, 1); 2243 } 2244 WPI_RXON_UNLOCK(sc); 2245 if (vap->iv_state == IEEE80211_S_RUN && 2246 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2247 ieee80211_beacon_miss(ic); 2248 } 2249 2250 break; 2251 } 2252 #ifdef WPI_DEBUG 2253 case WPI_BEACON_SENT: 2254 { 2255 struct wpi_tx_stat *stat = 2256 (struct wpi_tx_stat *)(desc + 1); 2257 uint64_t *tsf = (uint64_t *)(stat + 1); 2258 uint32_t *mode = (uint32_t *)(tsf + 1); 2259 2260 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2261 BUS_DMASYNC_POSTREAD); 2262 2263 DPRINTF(sc, WPI_DEBUG_BEACON, 2264 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2265 "duration %u, status %x, tsf %ju, mode %x\n", 2266 stat->rtsfailcnt, stat->ackfailcnt, 2267 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2268 le32toh(stat->status), le64toh(*tsf), 2269 le32toh(*mode)); 2270 2271 break; 2272 } 2273 #endif 2274 case WPI_UC_READY: 2275 { 2276 struct wpi_ucode_info *uc = 2277 (struct wpi_ucode_info *)(desc + 1); 2278 2279 /* The microcontroller is ready. */ 2280 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2281 BUS_DMASYNC_POSTREAD); 2282 DPRINTF(sc, WPI_DEBUG_RESET, 2283 "microcode alive notification version=%d.%d " 2284 "subtype=%x alive=%x\n", uc->major, uc->minor, 2285 uc->subtype, le32toh(uc->valid)); 2286 2287 if (le32toh(uc->valid) != 1) { 2288 device_printf(sc->sc_dev, 2289 "microcontroller initialization failed\n"); 2290 wpi_stop_locked(sc); 2291 return; 2292 } 2293 /* Save the address of the error log in SRAM. */ 2294 sc->errptr = le32toh(uc->errptr); 2295 break; 2296 } 2297 case WPI_STATE_CHANGED: 2298 { 2299 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2300 BUS_DMASYNC_POSTREAD); 2301 2302 uint32_t *status = (uint32_t *)(desc + 1); 2303 2304 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2305 le32toh(*status)); 2306 2307 if (le32toh(*status) & 1) { 2308 WPI_NT_LOCK(sc); 2309 wpi_clear_node_table(sc); 2310 WPI_NT_UNLOCK(sc); 2311 taskqueue_enqueue(sc->sc_tq, 2312 &sc->sc_radiooff_task); 2313 return; 2314 } 2315 break; 2316 } 2317 #ifdef WPI_DEBUG 2318 case WPI_START_SCAN: 2319 { 2320 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2321 BUS_DMASYNC_POSTREAD); 2322 2323 struct wpi_start_scan *scan = 2324 (struct wpi_start_scan *)(desc + 1); 2325 DPRINTF(sc, WPI_DEBUG_SCAN, 2326 "%s: scanning channel %d status %x\n", 2327 __func__, scan->chan, le32toh(scan->status)); 2328 2329 break; 2330 } 2331 #endif 2332 case WPI_STOP_SCAN: 2333 { 2334 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2335 BUS_DMASYNC_POSTREAD); 2336 2337 struct wpi_stop_scan *scan = 2338 (struct wpi_stop_scan *)(desc + 1); 2339 2340 DPRINTF(sc, WPI_DEBUG_SCAN, 2341 "scan finished nchan=%d status=%d chan=%d\n", 2342 scan->nchan, scan->status, scan->chan); 2343 2344 WPI_RXON_LOCK(sc); 2345 callout_stop(&sc->scan_timeout); 2346 WPI_RXON_UNLOCK(sc); 2347 if (scan->status == WPI_SCAN_ABORTED) 2348 ieee80211_cancel_scan(vap); 2349 else 2350 ieee80211_scan_next(vap); 2351 break; 2352 } 2353 } 2354 2355 if (sc->rxq.cur % 8 == 0) { 2356 /* Tell the firmware what we have processed. */ 2357 sc->sc_update_rx_ring(sc); 2358 } 2359 } 2360 } 2361 2362 /* 2363 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2364 * from power-down sleep mode. 2365 */ 2366 static void 2367 wpi_wakeup_intr(struct wpi_softc *sc) 2368 { 2369 int qid; 2370 2371 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2372 "%s: ucode wakeup from power-down sleep\n", __func__); 2373 2374 /* Wakeup RX and TX rings. */ 2375 if (sc->rxq.update) { 2376 sc->rxq.update = 0; 2377 wpi_update_rx_ring(sc); 2378 } 2379 WPI_TXQ_LOCK(sc); 2380 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2381 struct wpi_tx_ring *ring = &sc->txq[qid]; 2382 2383 if (ring->update) { 2384 ring->update = 0; 2385 wpi_update_tx_ring(sc, ring); 2386 } 2387 } 2388 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2389 WPI_TXQ_UNLOCK(sc); 2390 } 2391 2392 /* 2393 * This function prints firmware registers 2394 */ 2395 #ifdef WPI_DEBUG 2396 static void 2397 wpi_debug_registers(struct wpi_softc *sc) 2398 { 2399 size_t i; 2400 static const uint32_t csr_tbl[] = { 2401 WPI_HW_IF_CONFIG, 2402 WPI_INT, 2403 WPI_INT_MASK, 2404 WPI_FH_INT, 2405 WPI_GPIO_IN, 2406 WPI_RESET, 2407 WPI_GP_CNTRL, 2408 WPI_EEPROM, 2409 WPI_EEPROM_GP, 2410 WPI_GIO, 2411 WPI_UCODE_GP1, 2412 WPI_UCODE_GP2, 2413 WPI_GIO_CHICKEN, 2414 WPI_ANA_PLL, 2415 WPI_DBG_HPET_MEM, 2416 }; 2417 static const uint32_t prph_tbl[] = { 2418 WPI_APMG_CLK_CTRL, 2419 WPI_APMG_PS, 2420 WPI_APMG_PCI_STT, 2421 WPI_APMG_RFKILL, 2422 }; 2423 2424 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2425 2426 for (i = 0; i < nitems(csr_tbl); i++) { 2427 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2428 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2429 2430 if ((i + 1) % 2 == 0) 2431 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2432 } 2433 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2434 2435 if (wpi_nic_lock(sc) == 0) { 2436 for (i = 0; i < nitems(prph_tbl); i++) { 2437 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2438 wpi_get_prph_string(prph_tbl[i]), 2439 wpi_prph_read(sc, prph_tbl[i])); 2440 2441 if ((i + 1) % 2 == 0) 2442 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2443 } 2444 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2445 wpi_nic_unlock(sc); 2446 } else { 2447 DPRINTF(sc, WPI_DEBUG_REGISTER, 2448 "Cannot access internal registers.\n"); 2449 } 2450 } 2451 #endif 2452 2453 /* 2454 * Dump the error log of the firmware when a firmware panic occurs. Although 2455 * we can't debug the firmware because it is neither open source nor free, it 2456 * can help us to identify certain classes of problems. 2457 */ 2458 static void 2459 wpi_fatal_intr(struct wpi_softc *sc) 2460 { 2461 struct wpi_fw_dump dump; 2462 uint32_t i, offset, count; 2463 2464 /* Check that the error log address is valid. */ 2465 if (sc->errptr < WPI_FW_DATA_BASE || 2466 sc->errptr + sizeof (dump) > 2467 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2468 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2469 sc->errptr); 2470 return; 2471 } 2472 if (wpi_nic_lock(sc) != 0) { 2473 printf("%s: could not read firmware error log\n", __func__); 2474 return; 2475 } 2476 /* Read number of entries in the log. */ 2477 count = wpi_mem_read(sc, sc->errptr); 2478 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2479 printf("%s: invalid count field (count = %u)\n", __func__, 2480 count); 2481 wpi_nic_unlock(sc); 2482 return; 2483 } 2484 /* Skip "count" field. */ 2485 offset = sc->errptr + sizeof (uint32_t); 2486 printf("firmware error log (count = %u):\n", count); 2487 for (i = 0; i < count; i++) { 2488 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2489 sizeof (dump) / sizeof (uint32_t)); 2490 2491 printf(" error type = \"%s\" (0x%08X)\n", 2492 (dump.desc < nitems(wpi_fw_errmsg)) ? 2493 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2494 dump.desc); 2495 printf(" error data = 0x%08X\n", 2496 dump.data); 2497 printf(" branch link = 0x%08X%08X\n", 2498 dump.blink[0], dump.blink[1]); 2499 printf(" interrupt link = 0x%08X%08X\n", 2500 dump.ilink[0], dump.ilink[1]); 2501 printf(" time = %u\n", dump.time); 2502 2503 offset += sizeof (dump); 2504 } 2505 wpi_nic_unlock(sc); 2506 /* Dump driver status (TX and RX rings) while we're here. */ 2507 printf("driver status:\n"); 2508 WPI_TXQ_LOCK(sc); 2509 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2510 struct wpi_tx_ring *ring = &sc->txq[i]; 2511 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2512 i, ring->qid, ring->cur, ring->queued); 2513 } 2514 WPI_TXQ_UNLOCK(sc); 2515 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2516 } 2517 2518 static void 2519 wpi_intr(void *arg) 2520 { 2521 struct wpi_softc *sc = arg; 2522 uint32_t r1, r2; 2523 2524 WPI_LOCK(sc); 2525 2526 /* Disable interrupts. */ 2527 WPI_WRITE(sc, WPI_INT_MASK, 0); 2528 2529 r1 = WPI_READ(sc, WPI_INT); 2530 2531 if (__predict_false(r1 == 0xffffffff || 2532 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2533 goto end; /* Hardware gone! */ 2534 2535 r2 = WPI_READ(sc, WPI_FH_INT); 2536 2537 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2538 r1, r2); 2539 2540 if (r1 == 0 && r2 == 0) 2541 goto done; /* Interrupt not for us. */ 2542 2543 /* Acknowledge interrupts. */ 2544 WPI_WRITE(sc, WPI_INT, r1); 2545 WPI_WRITE(sc, WPI_FH_INT, r2); 2546 2547 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2548 device_printf(sc->sc_dev, "fatal firmware error\n"); 2549 #ifdef WPI_DEBUG 2550 wpi_debug_registers(sc); 2551 #endif 2552 wpi_fatal_intr(sc); 2553 DPRINTF(sc, WPI_DEBUG_HW, 2554 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2555 "(Hardware Error)"); 2556 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2557 goto end; 2558 } 2559 2560 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2561 (r2 & WPI_FH_INT_RX)) 2562 wpi_notif_intr(sc); 2563 2564 if (r1 & WPI_INT_ALIVE) 2565 wakeup(sc); /* Firmware is alive. */ 2566 2567 if (r1 & WPI_INT_WAKEUP) 2568 wpi_wakeup_intr(sc); 2569 2570 done: 2571 /* Re-enable interrupts. */ 2572 if (__predict_true(sc->sc_running)) 2573 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2574 2575 end: WPI_UNLOCK(sc); 2576 } 2577 2578 static int 2579 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2580 { 2581 struct ieee80211_frame *wh; 2582 struct wpi_tx_cmd *cmd; 2583 struct wpi_tx_data *data; 2584 struct wpi_tx_desc *desc; 2585 struct wpi_tx_ring *ring; 2586 struct mbuf *m1; 2587 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2588 int error, i, hdrlen, nsegs, totlen, pad; 2589 2590 WPI_TXQ_LOCK(sc); 2591 2592 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2593 2594 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2595 2596 if (__predict_false(sc->sc_running == 0)) { 2597 /* wpi_stop() was called */ 2598 error = ENETDOWN; 2599 goto fail; 2600 } 2601 2602 wh = mtod(buf->m, struct ieee80211_frame *); 2603 hdrlen = ieee80211_anyhdrsize(wh); 2604 totlen = buf->m->m_pkthdr.len; 2605 2606 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2607 error = EINVAL; 2608 goto fail; 2609 } 2610 2611 if (hdrlen & 3) { 2612 /* First segment length must be a multiple of 4. */ 2613 pad = 4 - (hdrlen & 3); 2614 } else 2615 pad = 0; 2616 2617 ring = &sc->txq[buf->ac]; 2618 desc = &ring->desc[ring->cur]; 2619 data = &ring->data[ring->cur]; 2620 2621 /* Prepare TX firmware command. */ 2622 cmd = &ring->cmd[ring->cur]; 2623 cmd->code = buf->code; 2624 cmd->flags = 0; 2625 cmd->qid = ring->qid; 2626 cmd->idx = ring->cur; 2627 2628 memcpy(cmd->data, buf->data, buf->size); 2629 2630 /* Save and trim IEEE802.11 header. */ 2631 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2632 m_adj(buf->m, hdrlen); 2633 2634 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2635 segs, &nsegs, BUS_DMA_NOWAIT); 2636 if (error != 0 && error != EFBIG) { 2637 device_printf(sc->sc_dev, 2638 "%s: can't map mbuf (error %d)\n", __func__, error); 2639 goto fail; 2640 } 2641 if (error != 0) { 2642 /* Too many DMA segments, linearize mbuf. */ 2643 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2644 if (m1 == NULL) { 2645 device_printf(sc->sc_dev, 2646 "%s: could not defrag mbuf\n", __func__); 2647 error = ENOBUFS; 2648 goto fail; 2649 } 2650 buf->m = m1; 2651 2652 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2653 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2654 if (__predict_false(error != 0)) { 2655 device_printf(sc->sc_dev, 2656 "%s: can't map mbuf (error %d)\n", __func__, 2657 error); 2658 goto fail; 2659 } 2660 } 2661 2662 KASSERT(nsegs < WPI_MAX_SCATTER, 2663 ("too many DMA segments, nsegs (%d) should be less than %d", 2664 nsegs, WPI_MAX_SCATTER)); 2665 2666 data->m = buf->m; 2667 data->ni = buf->ni; 2668 2669 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2670 __func__, ring->qid, ring->cur, totlen, nsegs); 2671 2672 /* Fill TX descriptor. */ 2673 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2674 /* First DMA segment is used by the TX command. */ 2675 desc->segs[0].addr = htole32(data->cmd_paddr); 2676 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2677 /* Other DMA segments are for data payload. */ 2678 seg = &segs[0]; 2679 for (i = 1; i <= nsegs; i++) { 2680 desc->segs[i].addr = htole32(seg->ds_addr); 2681 desc->segs[i].len = htole32(seg->ds_len); 2682 seg++; 2683 } 2684 2685 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2686 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2687 BUS_DMASYNC_PREWRITE); 2688 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2689 BUS_DMASYNC_PREWRITE); 2690 2691 /* Kick TX ring. */ 2692 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2693 sc->sc_update_tx_ring(sc, ring); 2694 2695 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2696 WPI_TXQ_STATE_LOCK(sc); 2697 ring->queued++; 2698 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2699 WPI_TXQ_STATE_UNLOCK(sc); 2700 } 2701 2702 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2703 2704 WPI_TXQ_UNLOCK(sc); 2705 2706 return 0; 2707 2708 fail: m_freem(buf->m); 2709 2710 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2711 2712 WPI_TXQ_UNLOCK(sc); 2713 2714 return error; 2715 } 2716 2717 /* 2718 * Construct the data packet for a transmit buffer. 2719 */ 2720 static int 2721 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2722 { 2723 const struct ieee80211_txparam *tp; 2724 struct ieee80211vap *vap = ni->ni_vap; 2725 struct ieee80211com *ic = ni->ni_ic; 2726 struct wpi_node *wn = WPI_NODE(ni); 2727 struct ieee80211_channel *chan; 2728 struct ieee80211_frame *wh; 2729 struct ieee80211_key *k = NULL; 2730 struct wpi_buf tx_data; 2731 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2732 uint32_t flags; 2733 uint16_t qos; 2734 uint8_t tid, type; 2735 int ac, error, swcrypt, rate, ismcast, totlen; 2736 2737 wh = mtod(m, struct ieee80211_frame *); 2738 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2739 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2740 swcrypt = 1; 2741 2742 /* Select EDCA Access Category and TX ring for this frame. */ 2743 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2744 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2745 tid = qos & IEEE80211_QOS_TID; 2746 } else { 2747 qos = 0; 2748 tid = 0; 2749 } 2750 ac = M_WME_GETAC(m); 2751 2752 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2753 ni->ni_chan : ic->ic_curchan; 2754 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2755 2756 /* Choose a TX rate index. */ 2757 if (type == IEEE80211_FC0_TYPE_MGT) 2758 rate = tp->mgmtrate; 2759 else if (ismcast) 2760 rate = tp->mcastrate; 2761 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2762 rate = tp->ucastrate; 2763 else if (m->m_flags & M_EAPOL) 2764 rate = tp->mgmtrate; 2765 else { 2766 /* XXX pass pktlen */ 2767 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2768 rate = ni->ni_txrate; 2769 } 2770 2771 /* Encrypt the frame if need be. */ 2772 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2773 /* Retrieve key for TX. */ 2774 k = ieee80211_crypto_encap(ni, m); 2775 if (k == NULL) { 2776 error = ENOBUFS; 2777 goto fail; 2778 } 2779 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2780 2781 /* 802.11 header may have moved. */ 2782 wh = mtod(m, struct ieee80211_frame *); 2783 } 2784 totlen = m->m_pkthdr.len; 2785 2786 if (ieee80211_radiotap_active_vap(vap)) { 2787 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2788 2789 tap->wt_flags = 0; 2790 tap->wt_rate = rate; 2791 if (k != NULL) 2792 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2793 2794 ieee80211_radiotap_tx(vap, m); 2795 } 2796 2797 flags = 0; 2798 if (!ismcast) { 2799 /* Unicast frame, check if an ACK is expected. */ 2800 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2801 IEEE80211_QOS_ACKPOLICY_NOACK) 2802 flags |= WPI_TX_NEED_ACK; 2803 } 2804 2805 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2806 flags |= WPI_TX_AUTO_SEQ; 2807 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2808 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2809 2810 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2811 if (!ismcast) { 2812 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2813 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2814 flags |= WPI_TX_NEED_RTS; 2815 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2816 WPI_RATE_IS_OFDM(rate)) { 2817 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2818 flags |= WPI_TX_NEED_CTS; 2819 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2820 flags |= WPI_TX_NEED_RTS; 2821 } 2822 2823 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2824 flags |= WPI_TX_FULL_TXOP; 2825 } 2826 2827 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2828 if (type == IEEE80211_FC0_TYPE_MGT) { 2829 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2830 2831 /* Tell HW to set timestamp in probe responses. */ 2832 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2833 flags |= WPI_TX_INSERT_TSTAMP; 2834 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2835 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2836 tx->timeout = htole16(3); 2837 else 2838 tx->timeout = htole16(2); 2839 } 2840 2841 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2842 tx->id = WPI_ID_BROADCAST; 2843 else { 2844 if (wn->id == WPI_ID_UNDEFINED) { 2845 device_printf(sc->sc_dev, 2846 "%s: undefined node id\n", __func__); 2847 error = EINVAL; 2848 goto fail; 2849 } 2850 2851 tx->id = wn->id; 2852 } 2853 2854 if (!swcrypt) { 2855 switch (k->wk_cipher->ic_cipher) { 2856 case IEEE80211_CIPHER_AES_CCM: 2857 tx->security = WPI_CIPHER_CCMP; 2858 break; 2859 2860 default: 2861 break; 2862 } 2863 2864 memcpy(tx->key, k->wk_key, k->wk_keylen); 2865 } 2866 2867 tx->len = htole16(totlen); 2868 tx->flags = htole32(flags); 2869 tx->plcp = rate2plcp(rate); 2870 tx->tid = tid; 2871 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2872 tx->ofdm_mask = 0xff; 2873 tx->cck_mask = 0x0f; 2874 tx->rts_ntries = 7; 2875 tx->data_ntries = tp->maxretry; 2876 2877 tx_data.ni = ni; 2878 tx_data.m = m; 2879 tx_data.size = sizeof(struct wpi_cmd_data); 2880 tx_data.code = WPI_CMD_TX_DATA; 2881 tx_data.ac = ac; 2882 2883 return wpi_cmd2(sc, &tx_data); 2884 2885 fail: m_freem(m); 2886 return error; 2887 } 2888 2889 static int 2890 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2891 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2892 { 2893 struct ieee80211vap *vap = ni->ni_vap; 2894 struct ieee80211_key *k = NULL; 2895 struct ieee80211_frame *wh; 2896 struct wpi_buf tx_data; 2897 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2898 uint32_t flags; 2899 uint8_t type; 2900 int ac, rate, swcrypt, totlen; 2901 2902 wh = mtod(m, struct ieee80211_frame *); 2903 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2904 swcrypt = 1; 2905 2906 ac = params->ibp_pri & 3; 2907 2908 /* Choose a TX rate index. */ 2909 rate = params->ibp_rate0; 2910 2911 flags = 0; 2912 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2913 flags |= WPI_TX_AUTO_SEQ; 2914 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2915 flags |= WPI_TX_NEED_ACK; 2916 if (params->ibp_flags & IEEE80211_BPF_RTS) 2917 flags |= WPI_TX_NEED_RTS; 2918 if (params->ibp_flags & IEEE80211_BPF_CTS) 2919 flags |= WPI_TX_NEED_CTS; 2920 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2921 flags |= WPI_TX_FULL_TXOP; 2922 2923 /* Encrypt the frame if need be. */ 2924 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2925 /* Retrieve key for TX. */ 2926 k = ieee80211_crypto_encap(ni, m); 2927 if (k == NULL) { 2928 m_freem(m); 2929 return ENOBUFS; 2930 } 2931 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2932 2933 /* 802.11 header may have moved. */ 2934 wh = mtod(m, struct ieee80211_frame *); 2935 } 2936 totlen = m->m_pkthdr.len; 2937 2938 if (ieee80211_radiotap_active_vap(vap)) { 2939 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2940 2941 tap->wt_flags = 0; 2942 tap->wt_rate = rate; 2943 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2944 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2945 2946 ieee80211_radiotap_tx(vap, m); 2947 } 2948 2949 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2950 if (type == IEEE80211_FC0_TYPE_MGT) { 2951 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2952 2953 /* Tell HW to set timestamp in probe responses. */ 2954 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2955 flags |= WPI_TX_INSERT_TSTAMP; 2956 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2957 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2958 tx->timeout = htole16(3); 2959 else 2960 tx->timeout = htole16(2); 2961 } 2962 2963 if (!swcrypt) { 2964 switch (k->wk_cipher->ic_cipher) { 2965 case IEEE80211_CIPHER_AES_CCM: 2966 tx->security = WPI_CIPHER_CCMP; 2967 break; 2968 2969 default: 2970 break; 2971 } 2972 2973 memcpy(tx->key, k->wk_key, k->wk_keylen); 2974 } 2975 2976 tx->len = htole16(totlen); 2977 tx->flags = htole32(flags); 2978 tx->plcp = rate2plcp(rate); 2979 tx->id = WPI_ID_BROADCAST; 2980 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2981 tx->rts_ntries = params->ibp_try1; 2982 tx->data_ntries = params->ibp_try0; 2983 2984 tx_data.ni = ni; 2985 tx_data.m = m; 2986 tx_data.size = sizeof(struct wpi_cmd_data); 2987 tx_data.code = WPI_CMD_TX_DATA; 2988 tx_data.ac = ac; 2989 2990 return wpi_cmd2(sc, &tx_data); 2991 } 2992 2993 static __inline int 2994 wpi_tx_ring_is_full(struct wpi_softc *sc, int ac) 2995 { 2996 struct wpi_tx_ring *ring = &sc->txq[ac]; 2997 int retval; 2998 2999 WPI_TXQ_STATE_LOCK(sc); 3000 retval = (ring->queued > WPI_TX_RING_HIMARK); 3001 WPI_TXQ_STATE_UNLOCK(sc); 3002 3003 return retval; 3004 } 3005 3006 static __inline void 3007 wpi_handle_tx_failure(struct ieee80211_node *ni) 3008 { 3009 /* NB: m is reclaimed on tx failure */ 3010 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3011 ieee80211_free_node(ni); 3012 } 3013 3014 static int 3015 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3016 const struct ieee80211_bpf_params *params) 3017 { 3018 struct ieee80211com *ic = ni->ni_ic; 3019 struct wpi_softc *sc = ic->ic_softc; 3020 int ac, error = 0; 3021 3022 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3023 3024 ac = M_WME_GETAC(m); 3025 3026 WPI_TX_LOCK(sc); 3027 3028 if (sc->sc_running == 0 || wpi_tx_ring_is_full(sc, ac)) { 3029 m_freem(m); 3030 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3031 goto unlock; 3032 } 3033 3034 if (params == NULL) { 3035 /* 3036 * Legacy path; interpret frame contents to decide 3037 * precisely how to send the frame. 3038 */ 3039 error = wpi_tx_data(sc, m, ni); 3040 } else { 3041 /* 3042 * Caller supplied explicit parameters to use in 3043 * sending the frame. 3044 */ 3045 error = wpi_tx_data_raw(sc, m, ni, params); 3046 } 3047 3048 unlock: WPI_TX_UNLOCK(sc); 3049 3050 if (error != 0) { 3051 wpi_handle_tx_failure(ni); 3052 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3053 3054 return error; 3055 } 3056 3057 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3058 3059 return 0; 3060 } 3061 3062 static int 3063 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3064 { 3065 struct wpi_softc *sc = ic->ic_softc; 3066 struct ieee80211_node *ni; 3067 int ac, error; 3068 3069 WPI_TX_LOCK(sc); 3070 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3071 3072 /* Check if interface is up & running. */ 3073 if (__predict_false(sc->sc_running == 0)) { 3074 error = ENXIO; 3075 goto unlock; 3076 } 3077 3078 /* Check for available space. */ 3079 ac = M_WME_GETAC(m); 3080 if (wpi_tx_ring_is_full(sc, ac)) { 3081 error = ENOBUFS; 3082 goto unlock; 3083 } 3084 3085 error = 0; 3086 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3087 if (wpi_tx_data(sc, m, ni) != 0) { 3088 wpi_handle_tx_failure(ni); 3089 } 3090 3091 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3092 3093 unlock: WPI_TX_UNLOCK(sc); 3094 3095 return (error); 3096 } 3097 3098 static void 3099 wpi_watchdog_rfkill(void *arg) 3100 { 3101 struct wpi_softc *sc = arg; 3102 struct ieee80211com *ic = &sc->sc_ic; 3103 3104 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3105 3106 /* No need to lock firmware memory. */ 3107 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3108 /* Radio kill switch is still off. */ 3109 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3110 sc); 3111 } else 3112 ieee80211_runtask(ic, &sc->sc_radioon_task); 3113 } 3114 3115 static void 3116 wpi_scan_timeout(void *arg) 3117 { 3118 struct wpi_softc *sc = arg; 3119 struct ieee80211com *ic = &sc->sc_ic; 3120 3121 ic_printf(ic, "scan timeout\n"); 3122 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3123 } 3124 3125 static void 3126 wpi_tx_timeout(void *arg) 3127 { 3128 struct wpi_softc *sc = arg; 3129 struct ieee80211com *ic = &sc->sc_ic; 3130 3131 ic_printf(ic, "device timeout\n"); 3132 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3133 } 3134 3135 static void 3136 wpi_parent(struct ieee80211com *ic) 3137 { 3138 struct wpi_softc *sc = ic->ic_softc; 3139 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3140 3141 if (ic->ic_nrunning > 0) { 3142 if (wpi_init(sc) == 0) { 3143 ieee80211_notify_radio(ic, 1); 3144 ieee80211_start_all(ic); 3145 } else { 3146 ieee80211_notify_radio(ic, 0); 3147 ieee80211_stop(vap); 3148 } 3149 } else 3150 wpi_stop(sc); 3151 } 3152 3153 /* 3154 * Send a command to the firmware. 3155 */ 3156 static int 3157 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3158 int async) 3159 { 3160 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3161 struct wpi_tx_desc *desc; 3162 struct wpi_tx_data *data; 3163 struct wpi_tx_cmd *cmd; 3164 struct mbuf *m; 3165 bus_addr_t paddr; 3166 int totlen, error; 3167 3168 WPI_TXQ_LOCK(sc); 3169 3170 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3171 3172 if (__predict_false(sc->sc_running == 0)) { 3173 /* wpi_stop() was called */ 3174 if (code == WPI_CMD_SCAN) 3175 error = ENETDOWN; 3176 else 3177 error = 0; 3178 3179 goto fail; 3180 } 3181 3182 if (async == 0) 3183 WPI_LOCK_ASSERT(sc); 3184 3185 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3186 __func__, wpi_cmd_str(code), size, async); 3187 3188 desc = &ring->desc[ring->cur]; 3189 data = &ring->data[ring->cur]; 3190 totlen = 4 + size; 3191 3192 if (size > sizeof cmd->data) { 3193 /* Command is too large to fit in a descriptor. */ 3194 if (totlen > MCLBYTES) { 3195 error = EINVAL; 3196 goto fail; 3197 } 3198 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3199 if (m == NULL) { 3200 error = ENOMEM; 3201 goto fail; 3202 } 3203 cmd = mtod(m, struct wpi_tx_cmd *); 3204 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3205 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3206 if (error != 0) { 3207 m_freem(m); 3208 goto fail; 3209 } 3210 data->m = m; 3211 } else { 3212 cmd = &ring->cmd[ring->cur]; 3213 paddr = data->cmd_paddr; 3214 } 3215 3216 cmd->code = code; 3217 cmd->flags = 0; 3218 cmd->qid = ring->qid; 3219 cmd->idx = ring->cur; 3220 memcpy(cmd->data, buf, size); 3221 3222 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3223 desc->segs[0].addr = htole32(paddr); 3224 desc->segs[0].len = htole32(totlen); 3225 3226 if (size > sizeof cmd->data) { 3227 bus_dmamap_sync(ring->data_dmat, data->map, 3228 BUS_DMASYNC_PREWRITE); 3229 } else { 3230 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3231 BUS_DMASYNC_PREWRITE); 3232 } 3233 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3234 BUS_DMASYNC_PREWRITE); 3235 3236 /* Kick command ring. */ 3237 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3238 sc->sc_update_tx_ring(sc, ring); 3239 3240 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3241 3242 WPI_TXQ_UNLOCK(sc); 3243 3244 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3245 3246 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3247 3248 WPI_TXQ_UNLOCK(sc); 3249 3250 return error; 3251 } 3252 3253 /* 3254 * Configure HW multi-rate retries. 3255 */ 3256 static int 3257 wpi_mrr_setup(struct wpi_softc *sc) 3258 { 3259 struct ieee80211com *ic = &sc->sc_ic; 3260 struct wpi_mrr_setup mrr; 3261 int i, error; 3262 3263 /* CCK rates (not used with 802.11a). */ 3264 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3265 mrr.rates[i].flags = 0; 3266 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3267 /* Fallback to the immediate lower CCK rate (if any.) */ 3268 mrr.rates[i].next = 3269 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3270 /* Try twice at this rate before falling back to "next". */ 3271 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3272 } 3273 /* OFDM rates (not used with 802.11b). */ 3274 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3275 mrr.rates[i].flags = 0; 3276 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3277 /* Fallback to the immediate lower rate (if any.) */ 3278 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3279 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3280 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3281 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3282 i - 1; 3283 /* Try twice at this rate before falling back to "next". */ 3284 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3285 } 3286 /* Setup MRR for control frames. */ 3287 mrr.which = htole32(WPI_MRR_CTL); 3288 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3289 if (error != 0) { 3290 device_printf(sc->sc_dev, 3291 "could not setup MRR for control frames\n"); 3292 return error; 3293 } 3294 /* Setup MRR for data frames. */ 3295 mrr.which = htole32(WPI_MRR_DATA); 3296 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3297 if (error != 0) { 3298 device_printf(sc->sc_dev, 3299 "could not setup MRR for data frames\n"); 3300 return error; 3301 } 3302 return 0; 3303 } 3304 3305 static int 3306 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3307 { 3308 struct ieee80211com *ic = ni->ni_ic; 3309 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3310 struct wpi_node *wn = WPI_NODE(ni); 3311 struct wpi_node_info node; 3312 int error; 3313 3314 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3315 3316 if (wn->id == WPI_ID_UNDEFINED) 3317 return EINVAL; 3318 3319 memset(&node, 0, sizeof node); 3320 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3321 node.id = wn->id; 3322 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3323 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3324 node.action = htole32(WPI_ACTION_SET_RATE); 3325 node.antenna = WPI_ANTENNA_BOTH; 3326 3327 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3328 wn->id, ether_sprintf(ni->ni_macaddr)); 3329 3330 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3331 if (error != 0) { 3332 device_printf(sc->sc_dev, 3333 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3334 error); 3335 return error; 3336 } 3337 3338 if (wvp->wv_gtk != 0) { 3339 error = wpi_set_global_keys(ni); 3340 if (error != 0) { 3341 device_printf(sc->sc_dev, 3342 "%s: error while setting global keys\n", __func__); 3343 return ENXIO; 3344 } 3345 } 3346 3347 return 0; 3348 } 3349 3350 /* 3351 * Broadcast node is used to send group-addressed and management frames. 3352 */ 3353 static int 3354 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3355 { 3356 struct ieee80211com *ic = &sc->sc_ic; 3357 struct wpi_node_info node; 3358 3359 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3360 3361 memset(&node, 0, sizeof node); 3362 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3363 node.id = WPI_ID_BROADCAST; 3364 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3365 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3366 node.action = htole32(WPI_ACTION_SET_RATE); 3367 node.antenna = WPI_ANTENNA_BOTH; 3368 3369 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3370 3371 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3372 } 3373 3374 static int 3375 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3376 { 3377 struct wpi_node *wn = WPI_NODE(ni); 3378 int error; 3379 3380 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3381 3382 wn->id = wpi_add_node_entry_sta(sc); 3383 3384 if ((error = wpi_add_node(sc, ni)) != 0) { 3385 wpi_del_node_entry(sc, wn->id); 3386 wn->id = WPI_ID_UNDEFINED; 3387 return error; 3388 } 3389 3390 return 0; 3391 } 3392 3393 static int 3394 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3395 { 3396 struct wpi_node *wn = WPI_NODE(ni); 3397 int error; 3398 3399 KASSERT(wn->id == WPI_ID_UNDEFINED, 3400 ("the node %d was added before", wn->id)); 3401 3402 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3403 3404 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3405 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3406 return ENOMEM; 3407 } 3408 3409 if ((error = wpi_add_node(sc, ni)) != 0) { 3410 wpi_del_node_entry(sc, wn->id); 3411 wn->id = WPI_ID_UNDEFINED; 3412 return error; 3413 } 3414 3415 return 0; 3416 } 3417 3418 static void 3419 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3420 { 3421 struct wpi_node *wn = WPI_NODE(ni); 3422 struct wpi_cmd_del_node node; 3423 int error; 3424 3425 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3426 3427 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3428 3429 memset(&node, 0, sizeof node); 3430 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3431 node.count = 1; 3432 3433 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3434 wn->id, ether_sprintf(ni->ni_macaddr)); 3435 3436 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3437 if (error != 0) { 3438 device_printf(sc->sc_dev, 3439 "%s: could not delete node %u, error %d\n", __func__, 3440 wn->id, error); 3441 } 3442 } 3443 3444 static int 3445 wpi_updateedca(struct ieee80211com *ic) 3446 { 3447 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3448 struct wpi_softc *sc = ic->ic_softc; 3449 struct wpi_edca_params cmd; 3450 int aci, error; 3451 3452 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3453 3454 memset(&cmd, 0, sizeof cmd); 3455 cmd.flags = htole32(WPI_EDCA_UPDATE); 3456 for (aci = 0; aci < WME_NUM_AC; aci++) { 3457 const struct wmeParams *ac = 3458 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3459 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3460 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3461 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3462 cmd.ac[aci].txoplimit = 3463 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3464 3465 DPRINTF(sc, WPI_DEBUG_EDCA, 3466 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3467 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3468 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3469 cmd.ac[aci].txoplimit); 3470 } 3471 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3472 3473 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3474 3475 return error; 3476 #undef WPI_EXP2 3477 } 3478 3479 static void 3480 wpi_set_promisc(struct wpi_softc *sc) 3481 { 3482 struct ieee80211com *ic = &sc->sc_ic; 3483 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3484 uint32_t promisc_filter; 3485 3486 promisc_filter = WPI_FILTER_CTL; 3487 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3488 promisc_filter |= WPI_FILTER_PROMISC; 3489 3490 if (ic->ic_promisc > 0) 3491 sc->rxon.filter |= htole32(promisc_filter); 3492 else 3493 sc->rxon.filter &= ~htole32(promisc_filter); 3494 } 3495 3496 static void 3497 wpi_update_promisc(struct ieee80211com *ic) 3498 { 3499 struct wpi_softc *sc = ic->ic_softc; 3500 3501 WPI_RXON_LOCK(sc); 3502 wpi_set_promisc(sc); 3503 3504 if (wpi_send_rxon(sc, 1, 1) != 0) { 3505 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3506 __func__); 3507 } 3508 WPI_RXON_UNLOCK(sc); 3509 } 3510 3511 static void 3512 wpi_update_mcast(struct ieee80211com *ic) 3513 { 3514 /* Ignore */ 3515 } 3516 3517 static void 3518 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3519 { 3520 struct wpi_cmd_led led; 3521 3522 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3523 3524 led.which = which; 3525 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3526 led.off = off; 3527 led.on = on; 3528 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3529 } 3530 3531 static int 3532 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3533 { 3534 struct wpi_cmd_timing cmd; 3535 uint64_t val, mod; 3536 3537 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3538 3539 memset(&cmd, 0, sizeof cmd); 3540 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3541 cmd.bintval = htole16(ni->ni_intval); 3542 cmd.lintval = htole16(10); 3543 3544 /* Compute remaining time until next beacon. */ 3545 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3546 mod = le64toh(cmd.tstamp) % val; 3547 cmd.binitval = htole32((uint32_t)(val - mod)); 3548 3549 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3550 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3551 3552 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3553 } 3554 3555 /* 3556 * This function is called periodically (every 60 seconds) to adjust output 3557 * power to temperature changes. 3558 */ 3559 static void 3560 wpi_power_calibration(struct wpi_softc *sc) 3561 { 3562 int temp; 3563 3564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3565 3566 /* Update sensor data. */ 3567 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3568 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3569 3570 /* Sanity-check read value. */ 3571 if (temp < -260 || temp > 25) { 3572 /* This can't be correct, ignore. */ 3573 DPRINTF(sc, WPI_DEBUG_TEMP, 3574 "out-of-range temperature reported: %d\n", temp); 3575 return; 3576 } 3577 3578 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3579 3580 /* Adjust Tx power if need be. */ 3581 if (abs(temp - sc->temp) <= 6) 3582 return; 3583 3584 sc->temp = temp; 3585 3586 if (wpi_set_txpower(sc, 1) != 0) { 3587 /* just warn, too bad for the automatic calibration... */ 3588 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3589 } 3590 } 3591 3592 /* 3593 * Set TX power for current channel. 3594 */ 3595 static int 3596 wpi_set_txpower(struct wpi_softc *sc, int async) 3597 { 3598 struct wpi_power_group *group; 3599 struct wpi_cmd_txpower cmd; 3600 uint8_t chan; 3601 int idx, is_chan_5ghz, i; 3602 3603 /* Retrieve current channel from last RXON. */ 3604 chan = sc->rxon.chan; 3605 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3606 3607 /* Find the TX power group to which this channel belongs. */ 3608 if (is_chan_5ghz) { 3609 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3610 if (chan <= group->chan) 3611 break; 3612 } else 3613 group = &sc->groups[0]; 3614 3615 memset(&cmd, 0, sizeof cmd); 3616 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3617 cmd.chan = htole16(chan); 3618 3619 /* Set TX power for all OFDM and CCK rates. */ 3620 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3621 /* Retrieve TX power for this channel/rate. */ 3622 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3623 3624 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3625 3626 if (is_chan_5ghz) { 3627 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3628 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3629 } else { 3630 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3631 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3632 } 3633 DPRINTF(sc, WPI_DEBUG_TEMP, 3634 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3635 } 3636 3637 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3638 } 3639 3640 /* 3641 * Determine Tx power index for a given channel/rate combination. 3642 * This takes into account the regulatory information from EEPROM and the 3643 * current temperature. 3644 */ 3645 static int 3646 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3647 uint8_t chan, int is_chan_5ghz, int ridx) 3648 { 3649 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3650 #define fdivround(a, b, n) \ 3651 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3652 3653 /* Linear interpolation. */ 3654 #define interpolate(x, x1, y1, x2, y2, n) \ 3655 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3656 3657 struct wpi_power_sample *sample; 3658 int pwr, idx; 3659 3660 /* Default TX power is group maximum TX power minus 3dB. */ 3661 pwr = group->maxpwr / 2; 3662 3663 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3664 switch (ridx) { 3665 case WPI_RIDX_OFDM36: 3666 pwr -= is_chan_5ghz ? 5 : 0; 3667 break; 3668 case WPI_RIDX_OFDM48: 3669 pwr -= is_chan_5ghz ? 10 : 7; 3670 break; 3671 case WPI_RIDX_OFDM54: 3672 pwr -= is_chan_5ghz ? 12 : 9; 3673 break; 3674 } 3675 3676 /* Never exceed the channel maximum allowed TX power. */ 3677 pwr = min(pwr, sc->maxpwr[chan]); 3678 3679 /* Retrieve TX power index into gain tables from samples. */ 3680 for (sample = group->samples; sample < &group->samples[3]; sample++) 3681 if (pwr > sample[1].power) 3682 break; 3683 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3684 idx = interpolate(pwr, sample[0].power, sample[0].index, 3685 sample[1].power, sample[1].index, 19); 3686 3687 /*- 3688 * Adjust power index based on current temperature: 3689 * - if cooler than factory-calibrated: decrease output power 3690 * - if warmer than factory-calibrated: increase output power 3691 */ 3692 idx -= (sc->temp - group->temp) * 11 / 100; 3693 3694 /* Decrease TX power for CCK rates (-5dB). */ 3695 if (ridx >= WPI_RIDX_CCK1) 3696 idx += 10; 3697 3698 /* Make sure idx stays in a valid range. */ 3699 if (idx < 0) 3700 return 0; 3701 if (idx > WPI_MAX_PWR_INDEX) 3702 return WPI_MAX_PWR_INDEX; 3703 return idx; 3704 3705 #undef interpolate 3706 #undef fdivround 3707 } 3708 3709 /* 3710 * Set STA mode power saving level (between 0 and 5). 3711 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3712 */ 3713 static int 3714 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3715 { 3716 struct wpi_pmgt_cmd cmd; 3717 const struct wpi_pmgt *pmgt; 3718 uint32_t max, skip_dtim; 3719 uint32_t reg; 3720 int i; 3721 3722 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3723 "%s: dtim=%d, level=%d, async=%d\n", 3724 __func__, dtim, level, async); 3725 3726 /* Select which PS parameters to use. */ 3727 if (dtim <= 10) 3728 pmgt = &wpi_pmgt[0][level]; 3729 else 3730 pmgt = &wpi_pmgt[1][level]; 3731 3732 memset(&cmd, 0, sizeof cmd); 3733 if (level != 0) /* not CAM */ 3734 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3735 /* Retrieve PCIe Active State Power Management (ASPM). */ 3736 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3737 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3738 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3739 3740 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3741 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3742 3743 if (dtim == 0) { 3744 dtim = 1; 3745 skip_dtim = 0; 3746 } else 3747 skip_dtim = pmgt->skip_dtim; 3748 3749 if (skip_dtim != 0) { 3750 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3751 max = pmgt->intval[4]; 3752 if (max == (uint32_t)-1) 3753 max = dtim * (skip_dtim + 1); 3754 else if (max > dtim) 3755 max = (max / dtim) * dtim; 3756 } else 3757 max = dtim; 3758 3759 for (i = 0; i < 5; i++) 3760 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3761 3762 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3763 } 3764 3765 static int 3766 wpi_send_btcoex(struct wpi_softc *sc) 3767 { 3768 struct wpi_bluetooth cmd; 3769 3770 memset(&cmd, 0, sizeof cmd); 3771 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3772 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3773 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3774 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3775 __func__); 3776 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3777 } 3778 3779 static int 3780 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3781 { 3782 int error; 3783 3784 if (async) 3785 WPI_RXON_LOCK_ASSERT(sc); 3786 3787 if (assoc && wpi_check_bss_filter(sc) != 0) { 3788 struct wpi_assoc rxon_assoc; 3789 3790 rxon_assoc.flags = sc->rxon.flags; 3791 rxon_assoc.filter = sc->rxon.filter; 3792 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3793 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3794 rxon_assoc.reserved = 0; 3795 3796 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3797 sizeof (struct wpi_assoc), async); 3798 if (error != 0) { 3799 device_printf(sc->sc_dev, 3800 "RXON_ASSOC command failed, error %d\n", error); 3801 return error; 3802 } 3803 } else { 3804 if (async) { 3805 WPI_NT_LOCK(sc); 3806 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3807 sizeof (struct wpi_rxon), async); 3808 if (error == 0) 3809 wpi_clear_node_table(sc); 3810 WPI_NT_UNLOCK(sc); 3811 } else { 3812 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3813 sizeof (struct wpi_rxon), async); 3814 if (error == 0) 3815 wpi_clear_node_table(sc); 3816 } 3817 3818 if (error != 0) { 3819 device_printf(sc->sc_dev, 3820 "RXON command failed, error %d\n", error); 3821 return error; 3822 } 3823 3824 /* Add broadcast node. */ 3825 error = wpi_add_broadcast_node(sc, async); 3826 if (error != 0) { 3827 device_printf(sc->sc_dev, 3828 "could not add broadcast node, error %d\n", error); 3829 return error; 3830 } 3831 } 3832 3833 /* Configuration has changed, set Tx power accordingly. */ 3834 if ((error = wpi_set_txpower(sc, async)) != 0) { 3835 device_printf(sc->sc_dev, 3836 "%s: could not set TX power, error %d\n", __func__, error); 3837 return error; 3838 } 3839 3840 return 0; 3841 } 3842 3843 /** 3844 * Configure the card to listen to a particular channel, this transisions the 3845 * card in to being able to receive frames from remote devices. 3846 */ 3847 static int 3848 wpi_config(struct wpi_softc *sc) 3849 { 3850 struct ieee80211com *ic = &sc->sc_ic; 3851 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3852 struct ieee80211_channel *c = ic->ic_curchan; 3853 int error; 3854 3855 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3856 3857 /* Set power saving level to CAM during initialization. */ 3858 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3859 device_printf(sc->sc_dev, 3860 "%s: could not set power saving level\n", __func__); 3861 return error; 3862 } 3863 3864 /* Configure bluetooth coexistence. */ 3865 if ((error = wpi_send_btcoex(sc)) != 0) { 3866 device_printf(sc->sc_dev, 3867 "could not configure bluetooth coexistence\n"); 3868 return error; 3869 } 3870 3871 /* Configure adapter. */ 3872 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3873 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3874 3875 /* Set default channel. */ 3876 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3877 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3878 if (IEEE80211_IS_CHAN_2GHZ(c)) 3879 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3880 3881 sc->rxon.filter = WPI_FILTER_MULTICAST; 3882 switch (ic->ic_opmode) { 3883 case IEEE80211_M_STA: 3884 sc->rxon.mode = WPI_MODE_STA; 3885 break; 3886 case IEEE80211_M_IBSS: 3887 sc->rxon.mode = WPI_MODE_IBSS; 3888 sc->rxon.filter |= WPI_FILTER_BEACON; 3889 break; 3890 case IEEE80211_M_HOSTAP: 3891 /* XXX workaround for beaconing */ 3892 sc->rxon.mode = WPI_MODE_IBSS; 3893 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3894 break; 3895 case IEEE80211_M_AHDEMO: 3896 sc->rxon.mode = WPI_MODE_HOSTAP; 3897 break; 3898 case IEEE80211_M_MONITOR: 3899 sc->rxon.mode = WPI_MODE_MONITOR; 3900 break; 3901 default: 3902 device_printf(sc->sc_dev, "unknown opmode %d\n", 3903 ic->ic_opmode); 3904 return EINVAL; 3905 } 3906 sc->rxon.filter = htole32(sc->rxon.filter); 3907 wpi_set_promisc(sc); 3908 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3909 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3910 3911 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3912 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3913 __func__); 3914 return error; 3915 } 3916 3917 /* Setup rate scalling. */ 3918 if ((error = wpi_mrr_setup(sc)) != 0) { 3919 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3920 error); 3921 return error; 3922 } 3923 3924 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3925 3926 return 0; 3927 } 3928 3929 static uint16_t 3930 wpi_get_active_dwell_time(struct wpi_softc *sc, 3931 struct ieee80211_channel *c, uint8_t n_probes) 3932 { 3933 /* No channel? Default to 2GHz settings. */ 3934 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3935 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3936 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3937 } 3938 3939 /* 5GHz dwell time. */ 3940 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3941 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3942 } 3943 3944 /* 3945 * Limit the total dwell time. 3946 * 3947 * Returns the dwell time in milliseconds. 3948 */ 3949 static uint16_t 3950 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 3951 { 3952 struct ieee80211com *ic = &sc->sc_ic; 3953 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3954 int bintval = 0; 3955 3956 /* bintval is in TU (1.024mS) */ 3957 if (vap != NULL) 3958 bintval = vap->iv_bss->ni_intval; 3959 3960 /* 3961 * If it's non-zero, we should calculate the minimum of 3962 * it and the DWELL_BASE. 3963 * 3964 * XXX Yes, the math should take into account that bintval 3965 * is 1.024mS, not 1mS.. 3966 */ 3967 if (bintval > 0) { 3968 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 3969 bintval); 3970 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 3971 } 3972 3973 /* No association context? Default. */ 3974 return dwell_time; 3975 } 3976 3977 static uint16_t 3978 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 3979 { 3980 uint16_t passive; 3981 3982 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 3983 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 3984 else 3985 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 3986 3987 /* Clamp to the beacon interval if we're associated. */ 3988 return (wpi_limit_dwell(sc, passive)); 3989 } 3990 3991 static uint32_t 3992 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 3993 { 3994 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 3995 uint32_t nbeacons = time / bintval; 3996 3997 if (mod > WPI_PAUSE_MAX_TIME) 3998 mod = WPI_PAUSE_MAX_TIME; 3999 4000 return WPI_PAUSE_SCAN(nbeacons, mod); 4001 } 4002 4003 /* 4004 * Send a scan request to the firmware. 4005 */ 4006 static int 4007 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4008 { 4009 struct ieee80211com *ic = &sc->sc_ic; 4010 struct ieee80211_scan_state *ss = ic->ic_scan; 4011 struct ieee80211vap *vap = ss->ss_vap; 4012 struct wpi_scan_hdr *hdr; 4013 struct wpi_cmd_data *tx; 4014 struct wpi_scan_essid *essids; 4015 struct wpi_scan_chan *chan; 4016 struct ieee80211_frame *wh; 4017 struct ieee80211_rateset *rs; 4018 uint16_t dwell_active, dwell_passive; 4019 uint8_t *buf, *frm; 4020 int bgscan, bintval, buflen, error, i, nssid; 4021 4022 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4023 4024 /* 4025 * We are absolutely not allowed to send a scan command when another 4026 * scan command is pending. 4027 */ 4028 if (callout_pending(&sc->scan_timeout)) { 4029 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4030 __func__); 4031 error = EAGAIN; 4032 goto fail; 4033 } 4034 4035 bgscan = wpi_check_bss_filter(sc); 4036 bintval = vap->iv_bss->ni_intval; 4037 if (bgscan != 0 && 4038 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4039 error = EOPNOTSUPP; 4040 goto fail; 4041 } 4042 4043 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4044 if (buf == NULL) { 4045 device_printf(sc->sc_dev, 4046 "%s: could not allocate buffer for scan command\n", 4047 __func__); 4048 error = ENOMEM; 4049 goto fail; 4050 } 4051 hdr = (struct wpi_scan_hdr *)buf; 4052 4053 /* 4054 * Move to the next channel if no packets are received within 10 msecs 4055 * after sending the probe request. 4056 */ 4057 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4058 hdr->quiet_threshold = htole16(1); 4059 4060 if (bgscan != 0) { 4061 /* 4062 * Max needs to be greater than active and passive and quiet! 4063 * It's also in microseconds! 4064 */ 4065 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4066 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4067 bintval)); 4068 } 4069 4070 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4071 4072 tx = (struct wpi_cmd_data *)(hdr + 1); 4073 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4074 tx->id = WPI_ID_BROADCAST; 4075 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4076 4077 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4078 /* Send probe requests at 6Mbps. */ 4079 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4080 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4081 } else { 4082 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4083 /* Send probe requests at 1Mbps. */ 4084 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4085 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4086 } 4087 4088 essids = (struct wpi_scan_essid *)(tx + 1); 4089 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4090 for (i = 0; i < nssid; i++) { 4091 essids[i].id = IEEE80211_ELEMID_SSID; 4092 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4093 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4094 #ifdef WPI_DEBUG 4095 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4096 printf("Scanning Essid: "); 4097 ieee80211_print_essid(essids[i].data, essids[i].len); 4098 printf("\n"); 4099 } 4100 #endif 4101 } 4102 4103 /* 4104 * Build a probe request frame. Most of the following code is a 4105 * copy & paste of what is done in net80211. 4106 */ 4107 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4108 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4109 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4110 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4111 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4112 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4113 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4114 4115 frm = (uint8_t *)(wh + 1); 4116 frm = ieee80211_add_ssid(frm, NULL, 0); 4117 frm = ieee80211_add_rates(frm, rs); 4118 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4119 frm = ieee80211_add_xrates(frm, rs); 4120 4121 /* Set length of probe request. */ 4122 tx->len = htole16(frm - (uint8_t *)wh); 4123 4124 /* 4125 * Construct information about the channel that we 4126 * want to scan. The firmware expects this to be directly 4127 * after the scan probe request 4128 */ 4129 chan = (struct wpi_scan_chan *)frm; 4130 chan->chan = ieee80211_chan2ieee(ic, c); 4131 chan->flags = 0; 4132 if (nssid) { 4133 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4134 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4135 } else 4136 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4137 4138 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4139 chan->flags |= WPI_CHAN_ACTIVE; 4140 4141 /* 4142 * Calculate the active/passive dwell times. 4143 */ 4144 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4145 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4146 4147 /* Make sure they're valid. */ 4148 if (dwell_active > dwell_passive) 4149 dwell_active = dwell_passive; 4150 4151 chan->active = htole16(dwell_active); 4152 chan->passive = htole16(dwell_passive); 4153 4154 chan->dsp_gain = 0x6e; /* Default level */ 4155 4156 if (IEEE80211_IS_CHAN_5GHZ(c)) 4157 chan->rf_gain = 0x3b; 4158 else 4159 chan->rf_gain = 0x28; 4160 4161 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4162 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4163 4164 hdr->nchan++; 4165 4166 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4167 /* XXX Force probe request transmission. */ 4168 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4169 4170 chan++; 4171 4172 /* Reduce unnecessary delay. */ 4173 chan->flags = 0; 4174 chan->passive = chan->active = hdr->quiet_time; 4175 4176 hdr->nchan++; 4177 } 4178 4179 chan++; 4180 4181 buflen = (uint8_t *)chan - buf; 4182 hdr->len = htole16(buflen); 4183 4184 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4185 hdr->nchan); 4186 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4187 free(buf, M_DEVBUF); 4188 4189 if (error != 0) 4190 goto fail; 4191 4192 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4193 4194 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4195 4196 return 0; 4197 4198 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4199 4200 return error; 4201 } 4202 4203 static int 4204 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4205 { 4206 struct ieee80211com *ic = vap->iv_ic; 4207 struct ieee80211_node *ni = vap->iv_bss; 4208 struct ieee80211_channel *c = ni->ni_chan; 4209 int error; 4210 4211 WPI_RXON_LOCK(sc); 4212 4213 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4214 4215 /* Update adapter configuration. */ 4216 sc->rxon.associd = 0; 4217 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4218 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4219 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4220 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4221 if (IEEE80211_IS_CHAN_2GHZ(c)) 4222 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4223 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4224 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4225 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4226 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4227 if (IEEE80211_IS_CHAN_A(c)) { 4228 sc->rxon.cck_mask = 0; 4229 sc->rxon.ofdm_mask = 0x15; 4230 } else if (IEEE80211_IS_CHAN_B(c)) { 4231 sc->rxon.cck_mask = 0x03; 4232 sc->rxon.ofdm_mask = 0; 4233 } else { 4234 /* Assume 802.11b/g. */ 4235 sc->rxon.cck_mask = 0x0f; 4236 sc->rxon.ofdm_mask = 0x15; 4237 } 4238 4239 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4240 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4241 sc->rxon.ofdm_mask); 4242 4243 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4244 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4245 __func__); 4246 } 4247 4248 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4249 4250 WPI_RXON_UNLOCK(sc); 4251 4252 return error; 4253 } 4254 4255 static int 4256 wpi_config_beacon(struct wpi_vap *wvp) 4257 { 4258 struct ieee80211vap *vap = &wvp->wv_vap; 4259 struct ieee80211com *ic = vap->iv_ic; 4260 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4261 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4262 struct wpi_softc *sc = ic->ic_softc; 4263 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4264 struct ieee80211_tim_ie *tie; 4265 struct mbuf *m; 4266 uint8_t *ptr; 4267 int error; 4268 4269 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4270 4271 WPI_VAP_LOCK_ASSERT(wvp); 4272 4273 cmd->len = htole16(bcn->m->m_pkthdr.len); 4274 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4275 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4276 4277 /* XXX seems to be unused */ 4278 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4279 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4280 ptr = mtod(bcn->m, uint8_t *); 4281 4282 cmd->tim = htole16(bo->bo_tim - ptr); 4283 cmd->timsz = tie->tim_len; 4284 } 4285 4286 /* Necessary for recursion in ieee80211_beacon_update(). */ 4287 m = bcn->m; 4288 bcn->m = m_dup(m, M_NOWAIT); 4289 if (bcn->m == NULL) { 4290 device_printf(sc->sc_dev, 4291 "%s: could not copy beacon frame\n", __func__); 4292 error = ENOMEM; 4293 goto end; 4294 } 4295 4296 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4297 device_printf(sc->sc_dev, 4298 "%s: could not update beacon frame, error %d", __func__, 4299 error); 4300 } 4301 4302 /* Restore mbuf. */ 4303 end: bcn->m = m; 4304 4305 return error; 4306 } 4307 4308 static int 4309 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4310 { 4311 struct ieee80211vap *vap = ni->ni_vap; 4312 struct wpi_vap *wvp = WPI_VAP(vap); 4313 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4314 struct mbuf *m; 4315 int error; 4316 4317 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4318 4319 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4320 return EINVAL; 4321 4322 m = ieee80211_beacon_alloc(ni); 4323 if (m == NULL) { 4324 device_printf(sc->sc_dev, 4325 "%s: could not allocate beacon frame\n", __func__); 4326 return ENOMEM; 4327 } 4328 4329 WPI_VAP_LOCK(wvp); 4330 if (bcn->m != NULL) 4331 m_freem(bcn->m); 4332 4333 bcn->m = m; 4334 4335 error = wpi_config_beacon(wvp); 4336 WPI_VAP_UNLOCK(wvp); 4337 4338 return error; 4339 } 4340 4341 static void 4342 wpi_update_beacon(struct ieee80211vap *vap, int item) 4343 { 4344 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4345 struct wpi_vap *wvp = WPI_VAP(vap); 4346 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4347 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4348 struct ieee80211_node *ni = vap->iv_bss; 4349 int mcast = 0; 4350 4351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4352 4353 WPI_VAP_LOCK(wvp); 4354 if (bcn->m == NULL) { 4355 bcn->m = ieee80211_beacon_alloc(ni); 4356 if (bcn->m == NULL) { 4357 device_printf(sc->sc_dev, 4358 "%s: could not allocate beacon frame\n", __func__); 4359 4360 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4361 __func__); 4362 4363 WPI_VAP_UNLOCK(wvp); 4364 return; 4365 } 4366 } 4367 WPI_VAP_UNLOCK(wvp); 4368 4369 if (item == IEEE80211_BEACON_TIM) 4370 mcast = 1; /* TODO */ 4371 4372 setbit(bo->bo_flags, item); 4373 ieee80211_beacon_update(ni, bcn->m, mcast); 4374 4375 WPI_VAP_LOCK(wvp); 4376 wpi_config_beacon(wvp); 4377 WPI_VAP_UNLOCK(wvp); 4378 4379 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4380 } 4381 4382 static void 4383 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4384 { 4385 struct ieee80211vap *vap = ni->ni_vap; 4386 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4387 struct wpi_node *wn = WPI_NODE(ni); 4388 int error; 4389 4390 WPI_NT_LOCK(sc); 4391 4392 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4393 4394 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4395 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4396 device_printf(sc->sc_dev, 4397 "%s: could not add IBSS node, error %d\n", 4398 __func__, error); 4399 } 4400 } 4401 WPI_NT_UNLOCK(sc); 4402 } 4403 4404 static int 4405 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4406 { 4407 struct ieee80211com *ic = vap->iv_ic; 4408 struct ieee80211_node *ni = vap->iv_bss; 4409 struct ieee80211_channel *c = ni->ni_chan; 4410 int error; 4411 4412 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4413 4414 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4415 /* Link LED blinks while monitoring. */ 4416 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4417 return 0; 4418 } 4419 4420 /* XXX kernel panic workaround */ 4421 if (c == IEEE80211_CHAN_ANYC) { 4422 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4423 __func__); 4424 return EINVAL; 4425 } 4426 4427 if ((error = wpi_set_timing(sc, ni)) != 0) { 4428 device_printf(sc->sc_dev, 4429 "%s: could not set timing, error %d\n", __func__, error); 4430 return error; 4431 } 4432 4433 /* Update adapter configuration. */ 4434 WPI_RXON_LOCK(sc); 4435 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4436 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4437 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4438 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4439 if (IEEE80211_IS_CHAN_2GHZ(c)) 4440 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4441 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4442 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4443 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4444 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4445 if (IEEE80211_IS_CHAN_A(c)) { 4446 sc->rxon.cck_mask = 0; 4447 sc->rxon.ofdm_mask = 0x15; 4448 } else if (IEEE80211_IS_CHAN_B(c)) { 4449 sc->rxon.cck_mask = 0x03; 4450 sc->rxon.ofdm_mask = 0; 4451 } else { 4452 /* Assume 802.11b/g. */ 4453 sc->rxon.cck_mask = 0x0f; 4454 sc->rxon.ofdm_mask = 0x15; 4455 } 4456 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4457 4458 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4459 sc->rxon.chan, sc->rxon.flags); 4460 4461 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4462 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4463 __func__); 4464 return error; 4465 } 4466 4467 /* Start periodic calibration timer. */ 4468 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4469 4470 WPI_RXON_UNLOCK(sc); 4471 4472 if (vap->iv_opmode == IEEE80211_M_IBSS || 4473 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4474 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4475 device_printf(sc->sc_dev, 4476 "%s: could not setup beacon, error %d\n", __func__, 4477 error); 4478 return error; 4479 } 4480 } 4481 4482 if (vap->iv_opmode == IEEE80211_M_STA) { 4483 /* Add BSS node. */ 4484 WPI_NT_LOCK(sc); 4485 error = wpi_add_sta_node(sc, ni); 4486 WPI_NT_UNLOCK(sc); 4487 if (error != 0) { 4488 device_printf(sc->sc_dev, 4489 "%s: could not add BSS node, error %d\n", __func__, 4490 error); 4491 return error; 4492 } 4493 } 4494 4495 /* Link LED always on while associated. */ 4496 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4497 4498 /* Enable power-saving mode if requested by user. */ 4499 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4500 vap->iv_opmode != IEEE80211_M_IBSS) 4501 (void)wpi_set_pslevel(sc, 0, 3, 1); 4502 4503 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4504 4505 return 0; 4506 } 4507 4508 static int 4509 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4510 { 4511 const struct ieee80211_cipher *cip = k->wk_cipher; 4512 struct ieee80211vap *vap = ni->ni_vap; 4513 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4514 struct wpi_node *wn = WPI_NODE(ni); 4515 struct wpi_node_info node; 4516 uint16_t kflags; 4517 int error; 4518 4519 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4520 4521 if (wpi_check_node_entry(sc, wn->id) == 0) { 4522 device_printf(sc->sc_dev, "%s: node does not exist\n", 4523 __func__); 4524 return 0; 4525 } 4526 4527 switch (cip->ic_cipher) { 4528 case IEEE80211_CIPHER_AES_CCM: 4529 kflags = WPI_KFLAG_CCMP; 4530 break; 4531 4532 default: 4533 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4534 cip->ic_cipher); 4535 return 0; 4536 } 4537 4538 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4539 if (k->wk_flags & IEEE80211_KEY_GROUP) 4540 kflags |= WPI_KFLAG_MULTICAST; 4541 4542 memset(&node, 0, sizeof node); 4543 node.id = wn->id; 4544 node.control = WPI_NODE_UPDATE; 4545 node.flags = WPI_FLAG_KEY_SET; 4546 node.kflags = htole16(kflags); 4547 memcpy(node.key, k->wk_key, k->wk_keylen); 4548 again: 4549 DPRINTF(sc, WPI_DEBUG_KEY, 4550 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4551 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4552 node.id, ether_sprintf(ni->ni_macaddr)); 4553 4554 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4555 if (error != 0) { 4556 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4557 error); 4558 return !error; 4559 } 4560 4561 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4562 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4563 kflags |= WPI_KFLAG_MULTICAST; 4564 node.kflags = htole16(kflags); 4565 4566 goto again; 4567 } 4568 4569 return 1; 4570 } 4571 4572 static void 4573 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4574 { 4575 const struct ieee80211_key *k = arg; 4576 struct ieee80211vap *vap = ni->ni_vap; 4577 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4578 struct wpi_node *wn = WPI_NODE(ni); 4579 int error; 4580 4581 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4582 return; 4583 4584 WPI_NT_LOCK(sc); 4585 error = wpi_load_key(ni, k); 4586 WPI_NT_UNLOCK(sc); 4587 4588 if (error == 0) { 4589 device_printf(sc->sc_dev, "%s: error while setting key\n", 4590 __func__); 4591 } 4592 } 4593 4594 static int 4595 wpi_set_global_keys(struct ieee80211_node *ni) 4596 { 4597 struct ieee80211vap *vap = ni->ni_vap; 4598 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4599 int error = 1; 4600 4601 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4602 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4603 error = wpi_load_key(ni, wk); 4604 4605 return !error; 4606 } 4607 4608 static int 4609 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4610 { 4611 struct ieee80211vap *vap = ni->ni_vap; 4612 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4613 struct wpi_node *wn = WPI_NODE(ni); 4614 struct wpi_node_info node; 4615 uint16_t kflags; 4616 int error; 4617 4618 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4619 4620 if (wpi_check_node_entry(sc, wn->id) == 0) { 4621 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4622 return 1; /* Nothing to do. */ 4623 } 4624 4625 kflags = WPI_KFLAG_KID(k->wk_keyix); 4626 if (k->wk_flags & IEEE80211_KEY_GROUP) 4627 kflags |= WPI_KFLAG_MULTICAST; 4628 4629 memset(&node, 0, sizeof node); 4630 node.id = wn->id; 4631 node.control = WPI_NODE_UPDATE; 4632 node.flags = WPI_FLAG_KEY_SET; 4633 node.kflags = htole16(kflags); 4634 again: 4635 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4636 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4637 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4638 4639 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4640 if (error != 0) { 4641 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4642 error); 4643 return !error; 4644 } 4645 4646 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4647 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4648 kflags |= WPI_KFLAG_MULTICAST; 4649 node.kflags = htole16(kflags); 4650 4651 goto again; 4652 } 4653 4654 return 1; 4655 } 4656 4657 static void 4658 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4659 { 4660 const struct ieee80211_key *k = arg; 4661 struct ieee80211vap *vap = ni->ni_vap; 4662 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4663 struct wpi_node *wn = WPI_NODE(ni); 4664 int error; 4665 4666 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4667 return; 4668 4669 WPI_NT_LOCK(sc); 4670 error = wpi_del_key(ni, k); 4671 WPI_NT_UNLOCK(sc); 4672 4673 if (error == 0) { 4674 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4675 __func__); 4676 } 4677 } 4678 4679 static int 4680 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4681 int set) 4682 { 4683 struct ieee80211com *ic = vap->iv_ic; 4684 struct wpi_softc *sc = ic->ic_softc; 4685 struct wpi_vap *wvp = WPI_VAP(vap); 4686 struct ieee80211_node *ni; 4687 int error, ni_ref = 0; 4688 4689 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4690 4691 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4692 /* Not for us. */ 4693 return 1; 4694 } 4695 4696 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4697 /* XMIT keys are handled in wpi_tx_data(). */ 4698 return 1; 4699 } 4700 4701 /* Handle group keys. */ 4702 if (&vap->iv_nw_keys[0] <= k && 4703 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4704 WPI_NT_LOCK(sc); 4705 if (set) 4706 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4707 else 4708 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4709 WPI_NT_UNLOCK(sc); 4710 4711 if (vap->iv_state == IEEE80211_S_RUN) { 4712 ieee80211_iterate_nodes(&ic->ic_sta, 4713 set ? wpi_load_key_cb : wpi_del_key_cb, 4714 __DECONST(void *, k)); 4715 } 4716 4717 return 1; 4718 } 4719 4720 switch (vap->iv_opmode) { 4721 case IEEE80211_M_STA: 4722 ni = vap->iv_bss; 4723 break; 4724 4725 case IEEE80211_M_IBSS: 4726 case IEEE80211_M_AHDEMO: 4727 case IEEE80211_M_HOSTAP: 4728 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4729 if (ni == NULL) 4730 return 0; /* should not happen */ 4731 4732 ni_ref = 1; 4733 break; 4734 4735 default: 4736 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4737 vap->iv_opmode); 4738 return 0; 4739 } 4740 4741 WPI_NT_LOCK(sc); 4742 if (set) 4743 error = wpi_load_key(ni, k); 4744 else 4745 error = wpi_del_key(ni, k); 4746 WPI_NT_UNLOCK(sc); 4747 4748 if (ni_ref) 4749 ieee80211_node_decref(ni); 4750 4751 return error; 4752 } 4753 4754 static int 4755 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4756 { 4757 return wpi_process_key(vap, k, 1); 4758 } 4759 4760 static int 4761 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4762 { 4763 return wpi_process_key(vap, k, 0); 4764 } 4765 4766 /* 4767 * This function is called after the runtime firmware notifies us of its 4768 * readiness (called in a process context). 4769 */ 4770 static int 4771 wpi_post_alive(struct wpi_softc *sc) 4772 { 4773 int ntries, error; 4774 4775 /* Check (again) that the radio is not disabled. */ 4776 if ((error = wpi_nic_lock(sc)) != 0) 4777 return error; 4778 4779 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4780 4781 /* NB: Runtime firmware must be up and running. */ 4782 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4783 device_printf(sc->sc_dev, 4784 "RF switch: radio disabled (%s)\n", __func__); 4785 wpi_nic_unlock(sc); 4786 return EPERM; /* :-) */ 4787 } 4788 wpi_nic_unlock(sc); 4789 4790 /* Wait for thermal sensor to calibrate. */ 4791 for (ntries = 0; ntries < 1000; ntries++) { 4792 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4793 break; 4794 DELAY(10); 4795 } 4796 4797 if (ntries == 1000) { 4798 device_printf(sc->sc_dev, 4799 "timeout waiting for thermal sensor calibration\n"); 4800 return ETIMEDOUT; 4801 } 4802 4803 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4804 return 0; 4805 } 4806 4807 /* 4808 * The firmware boot code is small and is intended to be copied directly into 4809 * the NIC internal memory (no DMA transfer). 4810 */ 4811 static int 4812 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4813 { 4814 int error, ntries; 4815 4816 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4817 4818 size /= sizeof (uint32_t); 4819 4820 if ((error = wpi_nic_lock(sc)) != 0) 4821 return error; 4822 4823 /* Copy microcode image into NIC memory. */ 4824 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4825 (const uint32_t *)ucode, size); 4826 4827 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4828 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4829 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4830 4831 /* Start boot load now. */ 4832 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4833 4834 /* Wait for transfer to complete. */ 4835 for (ntries = 0; ntries < 1000; ntries++) { 4836 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4837 DPRINTF(sc, WPI_DEBUG_HW, 4838 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4839 WPI_FH_TX_STATUS_IDLE(6), 4840 status & WPI_FH_TX_STATUS_IDLE(6)); 4841 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4842 DPRINTF(sc, WPI_DEBUG_HW, 4843 "Status Match! - ntries = %d\n", ntries); 4844 break; 4845 } 4846 DELAY(10); 4847 } 4848 if (ntries == 1000) { 4849 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4850 __func__); 4851 wpi_nic_unlock(sc); 4852 return ETIMEDOUT; 4853 } 4854 4855 /* Enable boot after power up. */ 4856 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4857 4858 wpi_nic_unlock(sc); 4859 return 0; 4860 } 4861 4862 static int 4863 wpi_load_firmware(struct wpi_softc *sc) 4864 { 4865 struct wpi_fw_info *fw = &sc->fw; 4866 struct wpi_dma_info *dma = &sc->fw_dma; 4867 int error; 4868 4869 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4870 4871 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4872 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4873 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4874 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4875 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4876 4877 /* Tell adapter where to find initialization sections. */ 4878 if ((error = wpi_nic_lock(sc)) != 0) 4879 return error; 4880 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4881 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4882 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4883 dma->paddr + WPI_FW_DATA_MAXSZ); 4884 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4885 wpi_nic_unlock(sc); 4886 4887 /* Load firmware boot code. */ 4888 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4889 if (error != 0) { 4890 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4891 __func__); 4892 return error; 4893 } 4894 4895 /* Now press "execute". */ 4896 WPI_WRITE(sc, WPI_RESET, 0); 4897 4898 /* Wait at most one second for first alive notification. */ 4899 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4900 device_printf(sc->sc_dev, 4901 "%s: timeout waiting for adapter to initialize, error %d\n", 4902 __func__, error); 4903 return error; 4904 } 4905 4906 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4907 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4908 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4909 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4910 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4911 4912 /* Tell adapter where to find runtime sections. */ 4913 if ((error = wpi_nic_lock(sc)) != 0) 4914 return error; 4915 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4916 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4917 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4918 dma->paddr + WPI_FW_DATA_MAXSZ); 4919 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4920 WPI_FW_UPDATED | fw->main.textsz); 4921 wpi_nic_unlock(sc); 4922 4923 return 0; 4924 } 4925 4926 static int 4927 wpi_read_firmware(struct wpi_softc *sc) 4928 { 4929 const struct firmware *fp; 4930 struct wpi_fw_info *fw = &sc->fw; 4931 const struct wpi_firmware_hdr *hdr; 4932 int error; 4933 4934 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4935 4936 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4937 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4938 4939 WPI_UNLOCK(sc); 4940 fp = firmware_get(WPI_FW_NAME); 4941 WPI_LOCK(sc); 4942 4943 if (fp == NULL) { 4944 device_printf(sc->sc_dev, 4945 "could not load firmware image '%s'\n", WPI_FW_NAME); 4946 return EINVAL; 4947 } 4948 4949 sc->fw_fp = fp; 4950 4951 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 4952 device_printf(sc->sc_dev, 4953 "firmware file too short: %zu bytes\n", fp->datasize); 4954 error = EINVAL; 4955 goto fail; 4956 } 4957 4958 fw->size = fp->datasize; 4959 fw->data = (const uint8_t *)fp->data; 4960 4961 /* Extract firmware header information. */ 4962 hdr = (const struct wpi_firmware_hdr *)fw->data; 4963 4964 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 4965 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 4966 4967 fw->main.textsz = le32toh(hdr->rtextsz); 4968 fw->main.datasz = le32toh(hdr->rdatasz); 4969 fw->init.textsz = le32toh(hdr->itextsz); 4970 fw->init.datasz = le32toh(hdr->idatasz); 4971 fw->boot.textsz = le32toh(hdr->btextsz); 4972 fw->boot.datasz = 0; 4973 4974 /* Sanity-check firmware header. */ 4975 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 4976 fw->main.datasz > WPI_FW_DATA_MAXSZ || 4977 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 4978 fw->init.datasz > WPI_FW_DATA_MAXSZ || 4979 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 4980 (fw->boot.textsz & 3) != 0) { 4981 device_printf(sc->sc_dev, "invalid firmware header\n"); 4982 error = EINVAL; 4983 goto fail; 4984 } 4985 4986 /* Check that all firmware sections fit. */ 4987 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 4988 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 4989 device_printf(sc->sc_dev, 4990 "firmware file too short: %zu bytes\n", fw->size); 4991 error = EINVAL; 4992 goto fail; 4993 } 4994 4995 /* Get pointers to firmware sections. */ 4996 fw->main.text = (const uint8_t *)(hdr + 1); 4997 fw->main.data = fw->main.text + fw->main.textsz; 4998 fw->init.text = fw->main.data + fw->main.datasz; 4999 fw->init.data = fw->init.text + fw->init.textsz; 5000 fw->boot.text = fw->init.data + fw->init.datasz; 5001 5002 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5003 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5004 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5005 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5006 fw->main.textsz, fw->main.datasz, 5007 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5008 5009 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5010 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5011 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5012 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5013 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5014 5015 return 0; 5016 5017 fail: wpi_unload_firmware(sc); 5018 return error; 5019 } 5020 5021 /** 5022 * Free the referenced firmware image 5023 */ 5024 static void 5025 wpi_unload_firmware(struct wpi_softc *sc) 5026 { 5027 if (sc->fw_fp != NULL) { 5028 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5029 sc->fw_fp = NULL; 5030 } 5031 } 5032 5033 static int 5034 wpi_clock_wait(struct wpi_softc *sc) 5035 { 5036 int ntries; 5037 5038 /* Set "initialization complete" bit. */ 5039 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5040 5041 /* Wait for clock stabilization. */ 5042 for (ntries = 0; ntries < 2500; ntries++) { 5043 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5044 return 0; 5045 DELAY(100); 5046 } 5047 device_printf(sc->sc_dev, 5048 "%s: timeout waiting for clock stabilization\n", __func__); 5049 5050 return ETIMEDOUT; 5051 } 5052 5053 static int 5054 wpi_apm_init(struct wpi_softc *sc) 5055 { 5056 uint32_t reg; 5057 int error; 5058 5059 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5060 5061 /* Disable L0s exit timer (NMI bug workaround). */ 5062 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5063 /* Don't wait for ICH L0s (ICH bug workaround). */ 5064 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5065 5066 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5067 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5068 5069 /* Retrieve PCIe Active State Power Management (ASPM). */ 5070 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5071 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5072 if (reg & 0x02) /* L1 Entry enabled. */ 5073 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5074 else 5075 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5076 5077 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5078 5079 /* Wait for clock stabilization before accessing prph. */ 5080 if ((error = wpi_clock_wait(sc)) != 0) 5081 return error; 5082 5083 if ((error = wpi_nic_lock(sc)) != 0) 5084 return error; 5085 /* Cleanup. */ 5086 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5087 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5088 5089 /* Enable DMA and BSM (Bootstrap State Machine). */ 5090 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5091 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5092 DELAY(20); 5093 /* Disable L1-Active. */ 5094 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5095 wpi_nic_unlock(sc); 5096 5097 return 0; 5098 } 5099 5100 static void 5101 wpi_apm_stop_master(struct wpi_softc *sc) 5102 { 5103 int ntries; 5104 5105 /* Stop busmaster DMA activity. */ 5106 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5107 5108 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5109 WPI_GP_CNTRL_MAC_PS) 5110 return; /* Already asleep. */ 5111 5112 for (ntries = 0; ntries < 100; ntries++) { 5113 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5114 return; 5115 DELAY(10); 5116 } 5117 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5118 __func__); 5119 } 5120 5121 static void 5122 wpi_apm_stop(struct wpi_softc *sc) 5123 { 5124 wpi_apm_stop_master(sc); 5125 5126 /* Reset the entire device. */ 5127 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5128 DELAY(10); 5129 /* Clear "initialization complete" bit. */ 5130 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5131 } 5132 5133 static void 5134 wpi_nic_config(struct wpi_softc *sc) 5135 { 5136 uint32_t rev; 5137 5138 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5139 5140 /* voodoo from the Linux "driver".. */ 5141 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5142 if ((rev & 0xc0) == 0x40) 5143 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5144 else if (!(rev & 0x80)) 5145 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5146 5147 if (sc->cap == 0x80) 5148 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5149 5150 if ((sc->rev & 0xf0) == 0xd0) 5151 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5152 else 5153 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5154 5155 if (sc->type > 1) 5156 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5157 } 5158 5159 static int 5160 wpi_hw_init(struct wpi_softc *sc) 5161 { 5162 int chnl, ntries, error; 5163 5164 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5165 5166 /* Clear pending interrupts. */ 5167 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5168 5169 if ((error = wpi_apm_init(sc)) != 0) { 5170 device_printf(sc->sc_dev, 5171 "%s: could not power ON adapter, error %d\n", __func__, 5172 error); 5173 return error; 5174 } 5175 5176 /* Select VMAIN power source. */ 5177 if ((error = wpi_nic_lock(sc)) != 0) 5178 return error; 5179 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5180 wpi_nic_unlock(sc); 5181 /* Spin until VMAIN gets selected. */ 5182 for (ntries = 0; ntries < 5000; ntries++) { 5183 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5184 break; 5185 DELAY(10); 5186 } 5187 if (ntries == 5000) { 5188 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5189 return ETIMEDOUT; 5190 } 5191 5192 /* Perform adapter initialization. */ 5193 wpi_nic_config(sc); 5194 5195 /* Initialize RX ring. */ 5196 if ((error = wpi_nic_lock(sc)) != 0) 5197 return error; 5198 /* Set physical address of RX ring. */ 5199 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5200 /* Set physical address of RX read pointer. */ 5201 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5202 offsetof(struct wpi_shared, next)); 5203 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5204 /* Enable RX. */ 5205 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5206 WPI_FH_RX_CONFIG_DMA_ENA | 5207 WPI_FH_RX_CONFIG_RDRBD_ENA | 5208 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5209 WPI_FH_RX_CONFIG_MAXFRAG | 5210 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5211 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5212 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5213 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5214 wpi_nic_unlock(sc); 5215 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5216 5217 /* Initialize TX rings. */ 5218 if ((error = wpi_nic_lock(sc)) != 0) 5219 return error; 5220 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5221 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5222 /* Enable all 6 TX rings. */ 5223 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5224 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5225 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5226 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5227 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5228 /* Set physical address of TX rings. */ 5229 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5230 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5231 5232 /* Enable all DMA channels. */ 5233 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5234 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5235 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5236 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5237 } 5238 wpi_nic_unlock(sc); 5239 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5240 5241 /* Clear "radio off" and "commands blocked" bits. */ 5242 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5243 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5244 5245 /* Clear pending interrupts. */ 5246 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5247 /* Enable interrupts. */ 5248 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5249 5250 /* _Really_ make sure "radio off" bit is cleared! */ 5251 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5252 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5253 5254 if ((error = wpi_load_firmware(sc)) != 0) { 5255 device_printf(sc->sc_dev, 5256 "%s: could not load firmware, error %d\n", __func__, 5257 error); 5258 return error; 5259 } 5260 /* Wait at most one second for firmware alive notification. */ 5261 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5262 device_printf(sc->sc_dev, 5263 "%s: timeout waiting for adapter to initialize, error %d\n", 5264 __func__, error); 5265 return error; 5266 } 5267 5268 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5269 5270 /* Do post-firmware initialization. */ 5271 return wpi_post_alive(sc); 5272 } 5273 5274 static void 5275 wpi_hw_stop(struct wpi_softc *sc) 5276 { 5277 int chnl, qid, ntries; 5278 5279 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5280 5281 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5282 wpi_nic_lock(sc); 5283 5284 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5285 5286 /* Disable interrupts. */ 5287 WPI_WRITE(sc, WPI_INT_MASK, 0); 5288 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5289 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5290 5291 /* Make sure we no longer hold the NIC lock. */ 5292 wpi_nic_unlock(sc); 5293 5294 if (wpi_nic_lock(sc) == 0) { 5295 /* Stop TX scheduler. */ 5296 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5297 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5298 5299 /* Stop all DMA channels. */ 5300 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5301 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5302 for (ntries = 0; ntries < 200; ntries++) { 5303 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5304 WPI_FH_TX_STATUS_IDLE(chnl)) 5305 break; 5306 DELAY(10); 5307 } 5308 } 5309 wpi_nic_unlock(sc); 5310 } 5311 5312 /* Stop RX ring. */ 5313 wpi_reset_rx_ring(sc); 5314 5315 /* Reset all TX rings. */ 5316 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5317 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5318 5319 if (wpi_nic_lock(sc) == 0) { 5320 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5321 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5322 wpi_nic_unlock(sc); 5323 } 5324 DELAY(5); 5325 /* Power OFF adapter. */ 5326 wpi_apm_stop(sc); 5327 } 5328 5329 static void 5330 wpi_radio_on(void *arg0, int pending) 5331 { 5332 struct wpi_softc *sc = arg0; 5333 struct ieee80211com *ic = &sc->sc_ic; 5334 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5335 5336 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5337 5338 WPI_LOCK(sc); 5339 callout_stop(&sc->watchdog_rfkill); 5340 WPI_UNLOCK(sc); 5341 5342 if (vap != NULL) 5343 ieee80211_init(vap); 5344 } 5345 5346 static void 5347 wpi_radio_off(void *arg0, int pending) 5348 { 5349 struct wpi_softc *sc = arg0; 5350 struct ieee80211com *ic = &sc->sc_ic; 5351 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5352 5353 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5354 5355 ieee80211_notify_radio(ic, 0); 5356 wpi_stop(sc); 5357 if (vap != NULL) 5358 ieee80211_stop(vap); 5359 5360 WPI_LOCK(sc); 5361 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5362 WPI_UNLOCK(sc); 5363 } 5364 5365 static int 5366 wpi_init(struct wpi_softc *sc) 5367 { 5368 int error = 0; 5369 5370 WPI_LOCK(sc); 5371 5372 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5373 5374 if (sc->sc_running != 0) 5375 goto end; 5376 5377 /* Check that the radio is not disabled by hardware switch. */ 5378 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5379 device_printf(sc->sc_dev, 5380 "RF switch: radio disabled (%s)\n", __func__); 5381 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5382 sc); 5383 error = EINPROGRESS; 5384 goto end; 5385 } 5386 5387 /* Read firmware images from the filesystem. */ 5388 if ((error = wpi_read_firmware(sc)) != 0) { 5389 device_printf(sc->sc_dev, 5390 "%s: could not read firmware, error %d\n", __func__, 5391 error); 5392 goto end; 5393 } 5394 5395 sc->sc_running = 1; 5396 5397 /* Initialize hardware and upload firmware. */ 5398 error = wpi_hw_init(sc); 5399 wpi_unload_firmware(sc); 5400 if (error != 0) { 5401 device_printf(sc->sc_dev, 5402 "%s: could not initialize hardware, error %d\n", __func__, 5403 error); 5404 goto fail; 5405 } 5406 5407 /* Configure adapter now that it is ready. */ 5408 if ((error = wpi_config(sc)) != 0) { 5409 device_printf(sc->sc_dev, 5410 "%s: could not configure device, error %d\n", __func__, 5411 error); 5412 goto fail; 5413 } 5414 5415 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5416 5417 WPI_UNLOCK(sc); 5418 5419 return 0; 5420 5421 fail: wpi_stop_locked(sc); 5422 5423 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5424 WPI_UNLOCK(sc); 5425 5426 return error; 5427 } 5428 5429 static void 5430 wpi_stop_locked(struct wpi_softc *sc) 5431 { 5432 5433 WPI_LOCK_ASSERT(sc); 5434 5435 if (sc->sc_running == 0) 5436 return; 5437 5438 WPI_TX_LOCK(sc); 5439 WPI_TXQ_LOCK(sc); 5440 sc->sc_running = 0; 5441 WPI_TXQ_UNLOCK(sc); 5442 WPI_TX_UNLOCK(sc); 5443 5444 WPI_TXQ_STATE_LOCK(sc); 5445 callout_stop(&sc->tx_timeout); 5446 WPI_TXQ_STATE_UNLOCK(sc); 5447 5448 WPI_RXON_LOCK(sc); 5449 callout_stop(&sc->scan_timeout); 5450 callout_stop(&sc->calib_to); 5451 WPI_RXON_UNLOCK(sc); 5452 5453 /* Power OFF hardware. */ 5454 wpi_hw_stop(sc); 5455 } 5456 5457 static void 5458 wpi_stop(struct wpi_softc *sc) 5459 { 5460 WPI_LOCK(sc); 5461 wpi_stop_locked(sc); 5462 WPI_UNLOCK(sc); 5463 } 5464 5465 /* 5466 * Callback from net80211 to start a scan. 5467 */ 5468 static void 5469 wpi_scan_start(struct ieee80211com *ic) 5470 { 5471 struct wpi_softc *sc = ic->ic_softc; 5472 5473 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5474 } 5475 5476 /* 5477 * Callback from net80211 to terminate a scan. 5478 */ 5479 static void 5480 wpi_scan_end(struct ieee80211com *ic) 5481 { 5482 struct wpi_softc *sc = ic->ic_softc; 5483 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5484 5485 if (vap->iv_state == IEEE80211_S_RUN) 5486 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5487 } 5488 5489 /** 5490 * Called by the net80211 framework to indicate to the driver 5491 * that the channel should be changed 5492 */ 5493 static void 5494 wpi_set_channel(struct ieee80211com *ic) 5495 { 5496 const struct ieee80211_channel *c = ic->ic_curchan; 5497 struct wpi_softc *sc = ic->ic_softc; 5498 int error; 5499 5500 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5501 5502 WPI_LOCK(sc); 5503 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5504 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5505 WPI_UNLOCK(sc); 5506 WPI_TX_LOCK(sc); 5507 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5508 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5509 WPI_TX_UNLOCK(sc); 5510 5511 /* 5512 * Only need to set the channel in Monitor mode. AP scanning and auth 5513 * are already taken care of by their respective firmware commands. 5514 */ 5515 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5516 WPI_RXON_LOCK(sc); 5517 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5518 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5519 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5520 WPI_RXON_24GHZ); 5521 } else { 5522 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5523 WPI_RXON_24GHZ); 5524 } 5525 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5526 device_printf(sc->sc_dev, 5527 "%s: error %d setting channel\n", __func__, 5528 error); 5529 WPI_RXON_UNLOCK(sc); 5530 } 5531 } 5532 5533 /** 5534 * Called by net80211 to indicate that we need to scan the current 5535 * channel. The channel is previously be set via the wpi_set_channel 5536 * callback. 5537 */ 5538 static void 5539 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5540 { 5541 struct ieee80211vap *vap = ss->ss_vap; 5542 struct ieee80211com *ic = vap->iv_ic; 5543 struct wpi_softc *sc = ic->ic_softc; 5544 int error; 5545 5546 WPI_RXON_LOCK(sc); 5547 error = wpi_scan(sc, ic->ic_curchan); 5548 WPI_RXON_UNLOCK(sc); 5549 if (error != 0) 5550 ieee80211_cancel_scan(vap); 5551 } 5552 5553 /** 5554 * Called by the net80211 framework to indicate 5555 * the minimum dwell time has been met, terminate the scan. 5556 * We don't actually terminate the scan as the firmware will notify 5557 * us when it's finished and we have no way to interrupt it. 5558 */ 5559 static void 5560 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5561 { 5562 /* NB: don't try to abort scan; wait for firmware to finish */ 5563 } 5564 5565 static void 5566 wpi_hw_reset(void *arg, int pending) 5567 { 5568 struct wpi_softc *sc = arg; 5569 struct ieee80211com *ic = &sc->sc_ic; 5570 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5571 5572 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5573 5574 ieee80211_notify_radio(ic, 0); 5575 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5576 ieee80211_cancel_scan(vap); 5577 5578 wpi_stop(sc); 5579 if (vap != NULL) { 5580 ieee80211_stop(vap); 5581 ieee80211_init(vap); 5582 } 5583 } 5584