1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 int); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, int); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, int); 176 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 181 const struct ieee80211_rx_stats *, 182 int, int); 183 static void wpi_restore_node(void *, struct ieee80211_node *); 184 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 185 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static void wpi_calib_timeout(void *); 187 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 188 struct wpi_rx_data *); 189 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 190 struct wpi_rx_data *); 191 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_notif_intr(struct wpi_softc *); 194 static void wpi_wakeup_intr(struct wpi_softc *); 195 #ifdef WPI_DEBUG 196 static void wpi_debug_registers(struct wpi_softc *); 197 #endif 198 static void wpi_fatal_intr(struct wpi_softc *); 199 static void wpi_intr(void *); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 209 static void wpi_start(void *, int); 210 static void wpi_watchdog_rfkill(void *); 211 static void wpi_scan_timeout(void *); 212 static void wpi_tx_timeout(void *); 213 static void wpi_parent(struct ieee80211com *); 214 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 215 static int wpi_mrr_setup(struct wpi_softc *); 216 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 217 static int wpi_add_broadcast_node(struct wpi_softc *, int); 218 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 219 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 220 static int wpi_updateedca(struct ieee80211com *); 221 static void wpi_set_promisc(struct wpi_softc *); 222 static void wpi_update_promisc(struct ieee80211com *); 223 static void wpi_update_mcast(struct ieee80211com *); 224 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 225 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 226 static void wpi_power_calibration(struct wpi_softc *); 227 static int wpi_set_txpower(struct wpi_softc *, int); 228 static int wpi_get_power_index(struct wpi_softc *, 229 struct wpi_power_group *, uint8_t, int, int); 230 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 231 static int wpi_send_btcoex(struct wpi_softc *); 232 static int wpi_send_rxon(struct wpi_softc *, int, int); 233 static int wpi_config(struct wpi_softc *); 234 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 235 struct ieee80211_channel *, uint8_t); 236 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 237 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 238 struct ieee80211_channel *); 239 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 240 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 241 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 242 static int wpi_config_beacon(struct wpi_vap *); 243 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 244 static void wpi_update_beacon(struct ieee80211vap *, int); 245 static void wpi_newassoc(struct ieee80211_node *, int); 246 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 247 static int wpi_load_key(struct ieee80211_node *, 248 const struct ieee80211_key *); 249 static void wpi_load_key_cb(void *, struct ieee80211_node *); 250 static int wpi_set_global_keys(struct ieee80211_node *); 251 static int wpi_del_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_del_key_cb(void *, struct ieee80211_node *); 254 static int wpi_process_key(struct ieee80211vap *, 255 const struct ieee80211_key *, int); 256 static int wpi_key_set(struct ieee80211vap *, 257 const struct ieee80211_key *, 258 const uint8_t mac[IEEE80211_ADDR_LEN]); 259 static int wpi_key_delete(struct ieee80211vap *, 260 const struct ieee80211_key *); 261 static int wpi_post_alive(struct wpi_softc *); 262 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 263 static int wpi_load_firmware(struct wpi_softc *); 264 static int wpi_read_firmware(struct wpi_softc *); 265 static void wpi_unload_firmware(struct wpi_softc *); 266 static int wpi_clock_wait(struct wpi_softc *); 267 static int wpi_apm_init(struct wpi_softc *); 268 static void wpi_apm_stop_master(struct wpi_softc *); 269 static void wpi_apm_stop(struct wpi_softc *); 270 static void wpi_nic_config(struct wpi_softc *); 271 static int wpi_hw_init(struct wpi_softc *); 272 static void wpi_hw_stop(struct wpi_softc *); 273 static void wpi_radio_on(void *, int); 274 static void wpi_radio_off(void *, int); 275 static int wpi_init(struct wpi_softc *); 276 static void wpi_stop_locked(struct wpi_softc *); 277 static void wpi_stop(struct wpi_softc *); 278 static void wpi_scan_start(struct ieee80211com *); 279 static void wpi_scan_end(struct ieee80211com *); 280 static void wpi_set_channel(struct ieee80211com *); 281 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 282 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 283 static void wpi_hw_reset(void *, int); 284 285 static device_method_t wpi_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, wpi_probe), 288 DEVMETHOD(device_attach, wpi_attach), 289 DEVMETHOD(device_detach, wpi_detach), 290 DEVMETHOD(device_shutdown, wpi_shutdown), 291 DEVMETHOD(device_suspend, wpi_suspend), 292 DEVMETHOD(device_resume, wpi_resume), 293 294 DEVMETHOD_END 295 }; 296 297 static driver_t wpi_driver = { 298 "wpi", 299 wpi_methods, 300 sizeof (struct wpi_softc) 301 }; 302 static devclass_t wpi_devclass; 303 304 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 305 306 MODULE_VERSION(wpi, 1); 307 308 MODULE_DEPEND(wpi, pci, 1, 1, 1); 309 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 310 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 311 312 static int 313 wpi_probe(device_t dev) 314 { 315 const struct wpi_ident *ident; 316 317 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 318 if (pci_get_vendor(dev) == ident->vendor && 319 pci_get_device(dev) == ident->device) { 320 device_set_desc(dev, ident->name); 321 return (BUS_PROBE_DEFAULT); 322 } 323 } 324 return ENXIO; 325 } 326 327 static int 328 wpi_attach(device_t dev) 329 { 330 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 331 struct ieee80211com *ic; 332 int i, error, rid; 333 #ifdef WPI_DEBUG 334 int supportsa = 1; 335 const struct wpi_ident *ident; 336 #endif 337 338 sc->sc_dev = dev; 339 340 #ifdef WPI_DEBUG 341 error = resource_int_value(device_get_name(sc->sc_dev), 342 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 343 if (error != 0) 344 sc->sc_debug = 0; 345 #else 346 sc->sc_debug = 0; 347 #endif 348 349 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 350 351 /* 352 * Get the offset of the PCI Express Capability Structure in PCI 353 * Configuration Space. 354 */ 355 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 356 if (error != 0) { 357 device_printf(dev, "PCIe capability structure not found!\n"); 358 return error; 359 } 360 361 /* 362 * Some card's only support 802.11b/g not a, check to see if 363 * this is one such card. A 0x0 in the subdevice table indicates 364 * the entire subdevice range is to be ignored. 365 */ 366 #ifdef WPI_DEBUG 367 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 368 if (ident->subdevice && 369 pci_get_subdevice(dev) == ident->subdevice) { 370 supportsa = 0; 371 break; 372 } 373 } 374 #endif 375 376 /* Clear device-specific "PCI retry timeout" register (41h). */ 377 pci_write_config(dev, 0x41, 0, 1); 378 379 /* Enable bus-mastering. */ 380 pci_enable_busmaster(dev); 381 382 rid = PCIR_BAR(0); 383 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 384 RF_ACTIVE); 385 if (sc->mem == NULL) { 386 device_printf(dev, "can't map mem space\n"); 387 return ENOMEM; 388 } 389 sc->sc_st = rman_get_bustag(sc->mem); 390 sc->sc_sh = rman_get_bushandle(sc->mem); 391 392 i = 1; 393 rid = 0; 394 if (pci_alloc_msi(dev, &i) == 0) 395 rid = 1; 396 /* Install interrupt handler. */ 397 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 398 (rid != 0 ? 0 : RF_SHAREABLE)); 399 if (sc->irq == NULL) { 400 device_printf(dev, "can't map interrupt\n"); 401 error = ENOMEM; 402 goto fail; 403 } 404 405 WPI_LOCK_INIT(sc); 406 WPI_TX_LOCK_INIT(sc); 407 WPI_RXON_LOCK_INIT(sc); 408 WPI_NT_LOCK_INIT(sc); 409 WPI_TXQ_LOCK_INIT(sc); 410 WPI_TXQ_STATE_LOCK_INIT(sc); 411 412 /* Allocate DMA memory for firmware transfers. */ 413 if ((error = wpi_alloc_fwmem(sc)) != 0) { 414 device_printf(dev, 415 "could not allocate memory for firmware, error %d\n", 416 error); 417 goto fail; 418 } 419 420 /* Allocate shared page. */ 421 if ((error = wpi_alloc_shared(sc)) != 0) { 422 device_printf(dev, "could not allocate shared page\n"); 423 goto fail; 424 } 425 426 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 427 for (i = 0; i < WPI_NTXQUEUES; i++) { 428 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 429 device_printf(dev, 430 "could not allocate TX ring %d, error %d\n", i, 431 error); 432 goto fail; 433 } 434 } 435 436 /* Allocate RX ring. */ 437 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 438 device_printf(dev, "could not allocate RX ring, error %d\n", 439 error); 440 goto fail; 441 } 442 443 /* Clear pending interrupts. */ 444 WPI_WRITE(sc, WPI_INT, 0xffffffff); 445 446 ic = &sc->sc_ic; 447 ic->ic_softc = sc; 448 ic->ic_name = device_get_nameunit(dev); 449 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 450 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 451 452 /* Set device capabilities. */ 453 ic->ic_caps = 454 IEEE80211_C_STA /* station mode supported */ 455 | IEEE80211_C_IBSS /* IBSS mode supported */ 456 | IEEE80211_C_HOSTAP /* Host access point mode */ 457 | IEEE80211_C_MONITOR /* monitor mode supported */ 458 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 459 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 460 | IEEE80211_C_TXPMGT /* tx power management */ 461 | IEEE80211_C_SHSLOT /* short slot time supported */ 462 | IEEE80211_C_WPA /* 802.11i */ 463 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 464 | IEEE80211_C_WME /* 802.11e */ 465 | IEEE80211_C_PMGT /* Station-side power mgmt */ 466 ; 467 468 ic->ic_cryptocaps = 469 IEEE80211_CRYPTO_AES_CCM; 470 471 /* 472 * Read in the eeprom and also setup the channels for 473 * net80211. We don't set the rates as net80211 does this for us 474 */ 475 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 476 device_printf(dev, "could not read EEPROM, error %d\n", 477 error); 478 goto fail; 479 } 480 481 #ifdef WPI_DEBUG 482 if (bootverbose) { 483 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 484 sc->domain); 485 device_printf(sc->sc_dev, "Hardware Type: %c\n", 486 sc->type > 1 ? 'B': '?'); 487 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 488 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 489 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 490 supportsa ? "does" : "does not"); 491 492 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 493 check what sc->rev really represents - benjsc 20070615 */ 494 } 495 #endif 496 497 ieee80211_ifattach(ic); 498 ic->ic_vap_create = wpi_vap_create; 499 ic->ic_vap_delete = wpi_vap_delete; 500 ic->ic_parent = wpi_parent; 501 ic->ic_raw_xmit = wpi_raw_xmit; 502 ic->ic_transmit = wpi_transmit; 503 ic->ic_node_alloc = wpi_node_alloc; 504 sc->sc_node_free = ic->ic_node_free; 505 ic->ic_node_free = wpi_node_free; 506 ic->ic_wme.wme_update = wpi_updateedca; 507 ic->ic_update_promisc = wpi_update_promisc; 508 ic->ic_update_mcast = wpi_update_mcast; 509 ic->ic_newassoc = wpi_newassoc; 510 ic->ic_scan_start = wpi_scan_start; 511 ic->ic_scan_end = wpi_scan_end; 512 ic->ic_set_channel = wpi_set_channel; 513 ic->ic_scan_curchan = wpi_scan_curchan; 514 ic->ic_scan_mindwell = wpi_scan_mindwell; 515 ic->ic_setregdomain = wpi_setregdomain; 516 517 sc->sc_update_rx_ring = wpi_update_rx_ring; 518 sc->sc_update_tx_ring = wpi_update_tx_ring; 519 520 wpi_radiotap_attach(sc); 521 522 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 523 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 524 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 525 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 526 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 527 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 528 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 529 TASK_INIT(&sc->sc_start_task, 0, wpi_start, sc); 530 531 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 532 taskqueue_thread_enqueue, &sc->sc_tq); 533 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 534 if (error != 0) { 535 device_printf(dev, "can't start threads, error %d\n", error); 536 goto fail; 537 } 538 539 wpi_sysctlattach(sc); 540 541 /* 542 * Hook our interrupt after all initialization is complete. 543 */ 544 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 545 NULL, wpi_intr, sc, &sc->sc_ih); 546 if (error != 0) { 547 device_printf(dev, "can't establish interrupt, error %d\n", 548 error); 549 goto fail; 550 } 551 552 if (bootverbose) 553 ieee80211_announce(ic); 554 555 #ifdef WPI_DEBUG 556 if (sc->sc_debug & WPI_DEBUG_HW) 557 ieee80211_announce_channels(ic); 558 #endif 559 560 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 561 return 0; 562 563 fail: wpi_detach(dev); 564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 565 return error; 566 } 567 568 /* 569 * Attach the interface to 802.11 radiotap. 570 */ 571 static void 572 wpi_radiotap_attach(struct wpi_softc *sc) 573 { 574 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 575 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 576 577 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 578 ieee80211_radiotap_attach(&sc->sc_ic, 579 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 580 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 581 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 582 } 583 584 static void 585 wpi_sysctlattach(struct wpi_softc *sc) 586 { 587 #ifdef WPI_DEBUG 588 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 589 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 590 591 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 592 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 593 "control debugging printfs"); 594 #endif 595 } 596 597 static void 598 wpi_init_beacon(struct wpi_vap *wvp) 599 { 600 struct wpi_buf *bcn = &wvp->wv_bcbuf; 601 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 602 603 cmd->id = WPI_ID_BROADCAST; 604 cmd->ofdm_mask = 0xff; 605 cmd->cck_mask = 0x0f; 606 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 607 608 /* 609 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 610 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 611 */ 612 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 613 614 bcn->code = WPI_CMD_SET_BEACON; 615 bcn->ac = WPI_CMD_QUEUE_NUM; 616 bcn->size = sizeof(struct wpi_cmd_beacon); 617 } 618 619 static struct ieee80211vap * 620 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 621 enum ieee80211_opmode opmode, int flags, 622 const uint8_t bssid[IEEE80211_ADDR_LEN], 623 const uint8_t mac[IEEE80211_ADDR_LEN]) 624 { 625 struct wpi_vap *wvp; 626 struct ieee80211vap *vap; 627 628 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 629 return NULL; 630 631 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 632 vap = &wvp->wv_vap; 633 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 634 635 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 636 WPI_VAP_LOCK_INIT(wvp); 637 wpi_init_beacon(wvp); 638 } 639 640 /* Override with driver methods. */ 641 vap->iv_key_set = wpi_key_set; 642 vap->iv_key_delete = wpi_key_delete; 643 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 644 vap->iv_recv_mgmt = wpi_recv_mgmt; 645 wvp->wv_newstate = vap->iv_newstate; 646 vap->iv_newstate = wpi_newstate; 647 vap->iv_update_beacon = wpi_update_beacon; 648 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 649 650 ieee80211_ratectl_init(vap); 651 /* Complete setup. */ 652 ieee80211_vap_attach(vap, ieee80211_media_change, 653 ieee80211_media_status, mac); 654 ic->ic_opmode = opmode; 655 return vap; 656 } 657 658 static void 659 wpi_vap_delete(struct ieee80211vap *vap) 660 { 661 struct wpi_vap *wvp = WPI_VAP(vap); 662 struct wpi_buf *bcn = &wvp->wv_bcbuf; 663 enum ieee80211_opmode opmode = vap->iv_opmode; 664 665 ieee80211_ratectl_deinit(vap); 666 ieee80211_vap_detach(vap); 667 668 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 669 if (bcn->m != NULL) 670 m_freem(bcn->m); 671 672 WPI_VAP_LOCK_DESTROY(wvp); 673 } 674 675 free(wvp, M_80211_VAP); 676 } 677 678 static int 679 wpi_detach(device_t dev) 680 { 681 struct wpi_softc *sc = device_get_softc(dev); 682 struct ieee80211com *ic = &sc->sc_ic; 683 int qid; 684 685 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 686 687 if (ic->ic_vap_create == wpi_vap_create) { 688 ieee80211_draintask(ic, &sc->sc_radioon_task); 689 ieee80211_draintask(ic, &sc->sc_start_task); 690 691 wpi_stop(sc); 692 693 if (sc->sc_tq != NULL) { 694 taskqueue_drain_all(sc->sc_tq); 695 taskqueue_free(sc->sc_tq); 696 } 697 698 callout_drain(&sc->watchdog_rfkill); 699 callout_drain(&sc->tx_timeout); 700 callout_drain(&sc->scan_timeout); 701 callout_drain(&sc->calib_to); 702 ieee80211_ifdetach(ic); 703 } 704 705 /* Uninstall interrupt handler. */ 706 if (sc->irq != NULL) { 707 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 708 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 709 sc->irq); 710 pci_release_msi(dev); 711 } 712 713 if (sc->txq[0].data_dmat) { 714 /* Free DMA resources. */ 715 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 716 wpi_free_tx_ring(sc, &sc->txq[qid]); 717 718 wpi_free_rx_ring(sc); 719 wpi_free_shared(sc); 720 } 721 722 if (sc->fw_dma.tag) 723 wpi_free_fwmem(sc); 724 725 if (sc->mem != NULL) 726 bus_release_resource(dev, SYS_RES_MEMORY, 727 rman_get_rid(sc->mem), sc->mem); 728 729 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 730 WPI_TXQ_STATE_LOCK_DESTROY(sc); 731 WPI_TXQ_LOCK_DESTROY(sc); 732 WPI_NT_LOCK_DESTROY(sc); 733 WPI_RXON_LOCK_DESTROY(sc); 734 WPI_TX_LOCK_DESTROY(sc); 735 WPI_LOCK_DESTROY(sc); 736 return 0; 737 } 738 739 static int 740 wpi_shutdown(device_t dev) 741 { 742 struct wpi_softc *sc = device_get_softc(dev); 743 744 wpi_stop(sc); 745 return 0; 746 } 747 748 static int 749 wpi_suspend(device_t dev) 750 { 751 struct wpi_softc *sc = device_get_softc(dev); 752 struct ieee80211com *ic = &sc->sc_ic; 753 754 ieee80211_suspend_all(ic); 755 return 0; 756 } 757 758 static int 759 wpi_resume(device_t dev) 760 { 761 struct wpi_softc *sc = device_get_softc(dev); 762 struct ieee80211com *ic = &sc->sc_ic; 763 764 /* Clear device-specific "PCI retry timeout" register (41h). */ 765 pci_write_config(dev, 0x41, 0, 1); 766 767 ieee80211_resume_all(ic); 768 return 0; 769 } 770 771 /* 772 * Grab exclusive access to NIC memory. 773 */ 774 static int 775 wpi_nic_lock(struct wpi_softc *sc) 776 { 777 int ntries; 778 779 /* Request exclusive access to NIC. */ 780 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 781 782 /* Spin until we actually get the lock. */ 783 for (ntries = 0; ntries < 1000; ntries++) { 784 if ((WPI_READ(sc, WPI_GP_CNTRL) & 785 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 786 WPI_GP_CNTRL_MAC_ACCESS_ENA) 787 return 0; 788 DELAY(10); 789 } 790 791 device_printf(sc->sc_dev, "could not lock memory\n"); 792 793 return ETIMEDOUT; 794 } 795 796 /* 797 * Release lock on NIC memory. 798 */ 799 static __inline void 800 wpi_nic_unlock(struct wpi_softc *sc) 801 { 802 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 803 } 804 805 static __inline uint32_t 806 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 807 { 808 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 809 WPI_BARRIER_READ_WRITE(sc); 810 return WPI_READ(sc, WPI_PRPH_RDATA); 811 } 812 813 static __inline void 814 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 815 { 816 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 817 WPI_BARRIER_WRITE(sc); 818 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 819 } 820 821 static __inline void 822 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 823 { 824 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 825 } 826 827 static __inline void 828 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 829 { 830 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 831 } 832 833 static __inline void 834 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 835 const uint32_t *data, int count) 836 { 837 for (; count > 0; count--, data++, addr += 4) 838 wpi_prph_write(sc, addr, *data); 839 } 840 841 static __inline uint32_t 842 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 843 { 844 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 845 WPI_BARRIER_READ_WRITE(sc); 846 return WPI_READ(sc, WPI_MEM_RDATA); 847 } 848 849 static __inline void 850 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 851 int count) 852 { 853 for (; count > 0; count--, addr += 4) 854 *data++ = wpi_mem_read(sc, addr); 855 } 856 857 static int 858 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 859 { 860 uint8_t *out = data; 861 uint32_t val; 862 int error, ntries; 863 864 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 865 866 if ((error = wpi_nic_lock(sc)) != 0) 867 return error; 868 869 for (; count > 0; count -= 2, addr++) { 870 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 871 for (ntries = 0; ntries < 10; ntries++) { 872 val = WPI_READ(sc, WPI_EEPROM); 873 if (val & WPI_EEPROM_READ_VALID) 874 break; 875 DELAY(5); 876 } 877 if (ntries == 10) { 878 device_printf(sc->sc_dev, 879 "timeout reading ROM at 0x%x\n", addr); 880 return ETIMEDOUT; 881 } 882 *out++= val >> 16; 883 if (count > 1) 884 *out ++= val >> 24; 885 } 886 887 wpi_nic_unlock(sc); 888 889 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 890 891 return 0; 892 } 893 894 static void 895 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 896 { 897 if (error != 0) 898 return; 899 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 900 *(bus_addr_t *)arg = segs[0].ds_addr; 901 } 902 903 /* 904 * Allocates a contiguous block of dma memory of the requested size and 905 * alignment. 906 */ 907 static int 908 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 909 void **kvap, bus_size_t size, bus_size_t alignment) 910 { 911 int error; 912 913 dma->tag = NULL; 914 dma->size = size; 915 916 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 917 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 918 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 919 if (error != 0) 920 goto fail; 921 922 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 923 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 924 if (error != 0) 925 goto fail; 926 927 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 928 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 929 if (error != 0) 930 goto fail; 931 932 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 933 934 if (kvap != NULL) 935 *kvap = dma->vaddr; 936 937 return 0; 938 939 fail: wpi_dma_contig_free(dma); 940 return error; 941 } 942 943 static void 944 wpi_dma_contig_free(struct wpi_dma_info *dma) 945 { 946 if (dma->vaddr != NULL) { 947 bus_dmamap_sync(dma->tag, dma->map, 948 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 949 bus_dmamap_unload(dma->tag, dma->map); 950 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 951 dma->vaddr = NULL; 952 } 953 if (dma->tag != NULL) { 954 bus_dma_tag_destroy(dma->tag); 955 dma->tag = NULL; 956 } 957 } 958 959 /* 960 * Allocate a shared page between host and NIC. 961 */ 962 static int 963 wpi_alloc_shared(struct wpi_softc *sc) 964 { 965 /* Shared buffer must be aligned on a 4KB boundary. */ 966 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 967 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 968 } 969 970 static void 971 wpi_free_shared(struct wpi_softc *sc) 972 { 973 wpi_dma_contig_free(&sc->shared_dma); 974 } 975 976 /* 977 * Allocate DMA-safe memory for firmware transfer. 978 */ 979 static int 980 wpi_alloc_fwmem(struct wpi_softc *sc) 981 { 982 /* Must be aligned on a 16-byte boundary. */ 983 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 984 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 985 } 986 987 static void 988 wpi_free_fwmem(struct wpi_softc *sc) 989 { 990 wpi_dma_contig_free(&sc->fw_dma); 991 } 992 993 static int 994 wpi_alloc_rx_ring(struct wpi_softc *sc) 995 { 996 struct wpi_rx_ring *ring = &sc->rxq; 997 bus_size_t size; 998 int i, error; 999 1000 ring->cur = 0; 1001 ring->update = 0; 1002 1003 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1004 1005 /* Allocate RX descriptors (16KB aligned.) */ 1006 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1007 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1008 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1009 if (error != 0) { 1010 device_printf(sc->sc_dev, 1011 "%s: could not allocate RX ring DMA memory, error %d\n", 1012 __func__, error); 1013 goto fail; 1014 } 1015 1016 /* Create RX buffer DMA tag. */ 1017 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1018 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1019 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1020 &ring->data_dmat); 1021 if (error != 0) { 1022 device_printf(sc->sc_dev, 1023 "%s: could not create RX buf DMA tag, error %d\n", 1024 __func__, error); 1025 goto fail; 1026 } 1027 1028 /* 1029 * Allocate and map RX buffers. 1030 */ 1031 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1032 struct wpi_rx_data *data = &ring->data[i]; 1033 bus_addr_t paddr; 1034 1035 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1036 if (error != 0) { 1037 device_printf(sc->sc_dev, 1038 "%s: could not create RX buf DMA map, error %d\n", 1039 __func__, error); 1040 goto fail; 1041 } 1042 1043 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1044 if (data->m == NULL) { 1045 device_printf(sc->sc_dev, 1046 "%s: could not allocate RX mbuf\n", __func__); 1047 error = ENOBUFS; 1048 goto fail; 1049 } 1050 1051 error = bus_dmamap_load(ring->data_dmat, data->map, 1052 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1053 &paddr, BUS_DMA_NOWAIT); 1054 if (error != 0 && error != EFBIG) { 1055 device_printf(sc->sc_dev, 1056 "%s: can't map mbuf (error %d)\n", __func__, 1057 error); 1058 goto fail; 1059 } 1060 1061 /* Set physical address of RX buffer. */ 1062 ring->desc[i] = htole32(paddr); 1063 } 1064 1065 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1066 BUS_DMASYNC_PREWRITE); 1067 1068 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1069 1070 return 0; 1071 1072 fail: wpi_free_rx_ring(sc); 1073 1074 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1075 1076 return error; 1077 } 1078 1079 static void 1080 wpi_update_rx_ring(struct wpi_softc *sc) 1081 { 1082 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1083 } 1084 1085 static void 1086 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1087 { 1088 struct wpi_rx_ring *ring = &sc->rxq; 1089 1090 if (ring->update != 0) { 1091 /* Wait for INT_WAKEUP event. */ 1092 return; 1093 } 1094 1095 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1096 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1097 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1098 __func__); 1099 ring->update = 1; 1100 } else { 1101 wpi_update_rx_ring(sc); 1102 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1103 } 1104 } 1105 1106 static void 1107 wpi_reset_rx_ring(struct wpi_softc *sc) 1108 { 1109 struct wpi_rx_ring *ring = &sc->rxq; 1110 int ntries; 1111 1112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1113 1114 if (wpi_nic_lock(sc) == 0) { 1115 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1116 for (ntries = 0; ntries < 1000; ntries++) { 1117 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1118 WPI_FH_RX_STATUS_IDLE) 1119 break; 1120 DELAY(10); 1121 } 1122 wpi_nic_unlock(sc); 1123 } 1124 1125 ring->cur = 0; 1126 ring->update = 0; 1127 } 1128 1129 static void 1130 wpi_free_rx_ring(struct wpi_softc *sc) 1131 { 1132 struct wpi_rx_ring *ring = &sc->rxq; 1133 int i; 1134 1135 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1136 1137 wpi_dma_contig_free(&ring->desc_dma); 1138 1139 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1140 struct wpi_rx_data *data = &ring->data[i]; 1141 1142 if (data->m != NULL) { 1143 bus_dmamap_sync(ring->data_dmat, data->map, 1144 BUS_DMASYNC_POSTREAD); 1145 bus_dmamap_unload(ring->data_dmat, data->map); 1146 m_freem(data->m); 1147 data->m = NULL; 1148 } 1149 if (data->map != NULL) 1150 bus_dmamap_destroy(ring->data_dmat, data->map); 1151 } 1152 if (ring->data_dmat != NULL) { 1153 bus_dma_tag_destroy(ring->data_dmat); 1154 ring->data_dmat = NULL; 1155 } 1156 } 1157 1158 static int 1159 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1160 { 1161 bus_addr_t paddr; 1162 bus_size_t size; 1163 int i, error; 1164 1165 ring->qid = qid; 1166 ring->queued = 0; 1167 ring->cur = 0; 1168 ring->update = 0; 1169 mbufq_init(&ring->snd, ifqmaxlen); 1170 1171 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1172 1173 /* Allocate TX descriptors (16KB aligned.) */ 1174 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1175 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1176 size, WPI_RING_DMA_ALIGN); 1177 if (error != 0) { 1178 device_printf(sc->sc_dev, 1179 "%s: could not allocate TX ring DMA memory, error %d\n", 1180 __func__, error); 1181 goto fail; 1182 } 1183 1184 /* Update shared area with ring physical address. */ 1185 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1186 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1187 BUS_DMASYNC_PREWRITE); 1188 1189 /* 1190 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1191 * to allocate commands space for other rings. 1192 * XXX Do we really need to allocate descriptors for other rings? 1193 */ 1194 if (qid > WPI_CMD_QUEUE_NUM) { 1195 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1196 return 0; 1197 } 1198 1199 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1200 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1201 size, 4); 1202 if (error != 0) { 1203 device_printf(sc->sc_dev, 1204 "%s: could not allocate TX cmd DMA memory, error %d\n", 1205 __func__, error); 1206 goto fail; 1207 } 1208 1209 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1210 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1211 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1212 &ring->data_dmat); 1213 if (error != 0) { 1214 device_printf(sc->sc_dev, 1215 "%s: could not create TX buf DMA tag, error %d\n", 1216 __func__, error); 1217 goto fail; 1218 } 1219 1220 paddr = ring->cmd_dma.paddr; 1221 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1222 struct wpi_tx_data *data = &ring->data[i]; 1223 1224 data->cmd_paddr = paddr; 1225 paddr += sizeof (struct wpi_tx_cmd); 1226 1227 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1228 if (error != 0) { 1229 device_printf(sc->sc_dev, 1230 "%s: could not create TX buf DMA map, error %d\n", 1231 __func__, error); 1232 goto fail; 1233 } 1234 } 1235 1236 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1237 1238 return 0; 1239 1240 fail: wpi_free_tx_ring(sc, ring); 1241 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1242 return error; 1243 } 1244 1245 static void 1246 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1247 { 1248 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1249 } 1250 1251 static void 1252 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1253 { 1254 1255 if (ring->update != 0) { 1256 /* Wait for INT_WAKEUP event. */ 1257 return; 1258 } 1259 1260 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1261 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1262 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1263 __func__, ring->qid); 1264 ring->update = 1; 1265 } else { 1266 wpi_update_tx_ring(sc, ring); 1267 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1268 } 1269 } 1270 1271 static void 1272 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1273 { 1274 int i; 1275 1276 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1277 1278 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1279 struct wpi_tx_data *data = &ring->data[i]; 1280 1281 if (data->m != NULL) { 1282 bus_dmamap_sync(ring->data_dmat, data->map, 1283 BUS_DMASYNC_POSTWRITE); 1284 bus_dmamap_unload(ring->data_dmat, data->map); 1285 m_freem(data->m); 1286 data->m = NULL; 1287 } 1288 if (data->ni != NULL) { 1289 ieee80211_free_node(data->ni); 1290 data->ni = NULL; 1291 } 1292 } 1293 /* Clear TX descriptors. */ 1294 memset(ring->desc, 0, ring->desc_dma.size); 1295 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1296 BUS_DMASYNC_PREWRITE); 1297 mbufq_drain(&ring->snd); 1298 sc->qfullmsk &= ~(1 << ring->qid); 1299 ring->queued = 0; 1300 ring->cur = 0; 1301 ring->update = 0; 1302 } 1303 1304 static void 1305 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1306 { 1307 int i; 1308 1309 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1310 1311 wpi_dma_contig_free(&ring->desc_dma); 1312 wpi_dma_contig_free(&ring->cmd_dma); 1313 1314 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1315 struct wpi_tx_data *data = &ring->data[i]; 1316 1317 if (data->m != NULL) { 1318 bus_dmamap_sync(ring->data_dmat, data->map, 1319 BUS_DMASYNC_POSTWRITE); 1320 bus_dmamap_unload(ring->data_dmat, data->map); 1321 m_freem(data->m); 1322 } 1323 if (data->map != NULL) 1324 bus_dmamap_destroy(ring->data_dmat, data->map); 1325 } 1326 if (ring->data_dmat != NULL) { 1327 bus_dma_tag_destroy(ring->data_dmat); 1328 ring->data_dmat = NULL; 1329 } 1330 } 1331 1332 /* 1333 * Extract various information from EEPROM. 1334 */ 1335 static int 1336 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1337 { 1338 #define WPI_CHK(res) do { \ 1339 if ((error = res) != 0) \ 1340 goto fail; \ 1341 } while (0) 1342 int error, i; 1343 1344 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1345 1346 /* Adapter has to be powered on for EEPROM access to work. */ 1347 if ((error = wpi_apm_init(sc)) != 0) { 1348 device_printf(sc->sc_dev, 1349 "%s: could not power ON adapter, error %d\n", __func__, 1350 error); 1351 return error; 1352 } 1353 1354 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1355 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1356 error = EIO; 1357 goto fail; 1358 } 1359 /* Clear HW ownership of EEPROM. */ 1360 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1361 1362 /* Read the hardware capabilities, revision and SKU type. */ 1363 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1364 sizeof(sc->cap))); 1365 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1366 sizeof(sc->rev))); 1367 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1368 sizeof(sc->type))); 1369 1370 sc->rev = le16toh(sc->rev); 1371 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1372 sc->rev, sc->type); 1373 1374 /* Read the regulatory domain (4 ASCII characters.) */ 1375 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1376 sizeof(sc->domain))); 1377 1378 /* Read MAC address. */ 1379 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1380 IEEE80211_ADDR_LEN)); 1381 1382 /* Read the list of authorized channels. */ 1383 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1384 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1385 1386 /* Read the list of TX power groups. */ 1387 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1388 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1389 1390 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1391 1392 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1393 __func__); 1394 1395 return error; 1396 #undef WPI_CHK 1397 } 1398 1399 /* 1400 * Translate EEPROM flags to net80211. 1401 */ 1402 static uint32_t 1403 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1404 { 1405 uint32_t nflags; 1406 1407 nflags = 0; 1408 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1409 nflags |= IEEE80211_CHAN_PASSIVE; 1410 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1411 nflags |= IEEE80211_CHAN_NOADHOC; 1412 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1413 nflags |= IEEE80211_CHAN_DFS; 1414 /* XXX apparently IBSS may still be marked */ 1415 nflags |= IEEE80211_CHAN_NOADHOC; 1416 } 1417 1418 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1419 if (nflags & IEEE80211_CHAN_NOADHOC) 1420 nflags |= IEEE80211_CHAN_NOHOSTAP; 1421 1422 return nflags; 1423 } 1424 1425 static void 1426 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1427 { 1428 struct ieee80211com *ic = &sc->sc_ic; 1429 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1430 const struct wpi_chan_band *band = &wpi_bands[n]; 1431 struct ieee80211_channel *c; 1432 uint8_t chan; 1433 int i, nflags; 1434 1435 for (i = 0; i < band->nchan; i++) { 1436 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1437 DPRINTF(sc, WPI_DEBUG_EEPROM, 1438 "Channel Not Valid: %d, band %d\n", 1439 band->chan[i],n); 1440 continue; 1441 } 1442 1443 chan = band->chan[i]; 1444 nflags = wpi_eeprom_channel_flags(&channels[i]); 1445 1446 c = &ic->ic_channels[ic->ic_nchans++]; 1447 c->ic_ieee = chan; 1448 c->ic_maxregpower = channels[i].maxpwr; 1449 c->ic_maxpower = 2*c->ic_maxregpower; 1450 1451 if (n == 0) { /* 2GHz band */ 1452 c->ic_freq = ieee80211_ieee2mhz(chan, 1453 IEEE80211_CHAN_G); 1454 1455 /* G =>'s B is supported */ 1456 c->ic_flags = IEEE80211_CHAN_B | nflags; 1457 c = &ic->ic_channels[ic->ic_nchans++]; 1458 c[0] = c[-1]; 1459 c->ic_flags = IEEE80211_CHAN_G | nflags; 1460 } else { /* 5GHz band */ 1461 c->ic_freq = ieee80211_ieee2mhz(chan, 1462 IEEE80211_CHAN_A); 1463 1464 c->ic_flags = IEEE80211_CHAN_A | nflags; 1465 } 1466 1467 /* Save maximum allowed TX power for this channel. */ 1468 sc->maxpwr[chan] = channels[i].maxpwr; 1469 1470 DPRINTF(sc, WPI_DEBUG_EEPROM, 1471 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1472 " offset %d\n", chan, c->ic_freq, 1473 channels[i].flags, sc->maxpwr[chan], 1474 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1475 } 1476 } 1477 1478 /** 1479 * Read the eeprom to find out what channels are valid for the given 1480 * band and update net80211 with what we find. 1481 */ 1482 static int 1483 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1484 { 1485 struct ieee80211com *ic = &sc->sc_ic; 1486 const struct wpi_chan_band *band = &wpi_bands[n]; 1487 int error; 1488 1489 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1490 1491 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1492 band->nchan * sizeof (struct wpi_eeprom_chan)); 1493 if (error != 0) { 1494 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1495 return error; 1496 } 1497 1498 wpi_read_eeprom_band(sc, n); 1499 1500 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1501 1502 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1503 1504 return 0; 1505 } 1506 1507 static struct wpi_eeprom_chan * 1508 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1509 { 1510 int i, j; 1511 1512 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1513 for (i = 0; i < wpi_bands[j].nchan; i++) 1514 if (wpi_bands[j].chan[i] == c->ic_ieee) 1515 return &sc->eeprom_channels[j][i]; 1516 1517 return NULL; 1518 } 1519 1520 /* 1521 * Enforce flags read from EEPROM. 1522 */ 1523 static int 1524 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1525 int nchan, struct ieee80211_channel chans[]) 1526 { 1527 struct wpi_softc *sc = ic->ic_softc; 1528 int i; 1529 1530 for (i = 0; i < nchan; i++) { 1531 struct ieee80211_channel *c = &chans[i]; 1532 struct wpi_eeprom_chan *channel; 1533 1534 channel = wpi_find_eeprom_channel(sc, c); 1535 if (channel == NULL) { 1536 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1537 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1538 return EINVAL; 1539 } 1540 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1541 } 1542 1543 return 0; 1544 } 1545 1546 static int 1547 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1548 { 1549 struct wpi_power_group *group = &sc->groups[n]; 1550 struct wpi_eeprom_group rgroup; 1551 int i, error; 1552 1553 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1554 1555 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1556 &rgroup, sizeof rgroup)) != 0) { 1557 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1558 return error; 1559 } 1560 1561 /* Save TX power group information. */ 1562 group->chan = rgroup.chan; 1563 group->maxpwr = rgroup.maxpwr; 1564 /* Retrieve temperature at which the samples were taken. */ 1565 group->temp = (int16_t)le16toh(rgroup.temp); 1566 1567 DPRINTF(sc, WPI_DEBUG_EEPROM, 1568 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1569 group->maxpwr, group->temp); 1570 1571 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1572 group->samples[i].index = rgroup.samples[i].index; 1573 group->samples[i].power = rgroup.samples[i].power; 1574 1575 DPRINTF(sc, WPI_DEBUG_EEPROM, 1576 "\tsample %d: index=%d power=%d\n", i, 1577 group->samples[i].index, group->samples[i].power); 1578 } 1579 1580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1581 1582 return 0; 1583 } 1584 1585 static int 1586 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1587 { 1588 int newid = WPI_ID_IBSS_MIN; 1589 1590 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1591 if ((sc->nodesmsk & (1 << newid)) == 0) { 1592 sc->nodesmsk |= 1 << newid; 1593 return newid; 1594 } 1595 } 1596 1597 return WPI_ID_UNDEFINED; 1598 } 1599 1600 static __inline int 1601 wpi_add_node_entry_sta(struct wpi_softc *sc) 1602 { 1603 sc->nodesmsk |= 1 << WPI_ID_BSS; 1604 1605 return WPI_ID_BSS; 1606 } 1607 1608 static __inline int 1609 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1610 { 1611 if (id == WPI_ID_UNDEFINED) 1612 return 0; 1613 1614 return (sc->nodesmsk >> id) & 1; 1615 } 1616 1617 static __inline void 1618 wpi_clear_node_table(struct wpi_softc *sc) 1619 { 1620 sc->nodesmsk = 0; 1621 } 1622 1623 static __inline void 1624 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1625 { 1626 sc->nodesmsk &= ~(1 << id); 1627 } 1628 1629 static struct ieee80211_node * 1630 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1631 { 1632 struct wpi_node *wn; 1633 1634 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1635 M_NOWAIT | M_ZERO); 1636 1637 if (wn == NULL) 1638 return NULL; 1639 1640 wn->id = WPI_ID_UNDEFINED; 1641 1642 return &wn->ni; 1643 } 1644 1645 static void 1646 wpi_node_free(struct ieee80211_node *ni) 1647 { 1648 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1649 struct wpi_node *wn = WPI_NODE(ni); 1650 1651 if (wn->id != WPI_ID_UNDEFINED) { 1652 WPI_NT_LOCK(sc); 1653 if (wpi_check_node_entry(sc, wn->id)) { 1654 wpi_del_node_entry(sc, wn->id); 1655 wpi_del_node(sc, ni); 1656 } 1657 WPI_NT_UNLOCK(sc); 1658 } 1659 1660 sc->sc_node_free(ni); 1661 } 1662 1663 static __inline int 1664 wpi_check_bss_filter(struct wpi_softc *sc) 1665 { 1666 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1667 } 1668 1669 static void 1670 wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1671 const struct ieee80211_rx_stats *rxs, 1672 int rssi, int nf) 1673 { 1674 struct ieee80211vap *vap = ni->ni_vap; 1675 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1676 struct wpi_vap *wvp = WPI_VAP(vap); 1677 uint64_t ni_tstamp, rx_tstamp; 1678 1679 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1680 1681 if (vap->iv_opmode == IEEE80211_M_IBSS && 1682 vap->iv_state == IEEE80211_S_RUN && 1683 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1684 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1685 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1686 rx_tstamp = le64toh(sc->rx_tstamp); 1687 1688 if (ni_tstamp >= rx_tstamp) { 1689 DPRINTF(sc, WPI_DEBUG_STATE, 1690 "ibss merge, tsf %ju tstamp %ju\n", 1691 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1692 (void) ieee80211_ibss_merge(ni); 1693 } 1694 } 1695 } 1696 1697 static void 1698 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1699 { 1700 struct wpi_softc *sc = arg; 1701 struct wpi_node *wn = WPI_NODE(ni); 1702 int error; 1703 1704 WPI_NT_LOCK(sc); 1705 if (wn->id != WPI_ID_UNDEFINED) { 1706 wn->id = WPI_ID_UNDEFINED; 1707 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1708 device_printf(sc->sc_dev, 1709 "%s: could not add IBSS node, error %d\n", 1710 __func__, error); 1711 } 1712 } 1713 WPI_NT_UNLOCK(sc); 1714 } 1715 1716 static void 1717 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1718 { 1719 struct ieee80211com *ic = &sc->sc_ic; 1720 1721 /* Set group keys once. */ 1722 WPI_NT_LOCK(sc); 1723 wvp->wv_gtk = 0; 1724 WPI_NT_UNLOCK(sc); 1725 1726 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1727 ieee80211_crypto_reload_keys(ic); 1728 } 1729 1730 /** 1731 * Called by net80211 when ever there is a change to 80211 state machine 1732 */ 1733 static int 1734 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1735 { 1736 struct wpi_vap *wvp = WPI_VAP(vap); 1737 struct ieee80211com *ic = vap->iv_ic; 1738 struct wpi_softc *sc = ic->ic_softc; 1739 int error = 0; 1740 1741 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1742 1743 WPI_TXQ_LOCK(sc); 1744 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1745 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1746 WPI_TXQ_UNLOCK(sc); 1747 1748 return ENXIO; 1749 } 1750 WPI_TXQ_UNLOCK(sc); 1751 1752 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1753 ieee80211_state_name[vap->iv_state], 1754 ieee80211_state_name[nstate]); 1755 1756 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1757 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1758 device_printf(sc->sc_dev, 1759 "%s: could not set power saving level\n", 1760 __func__); 1761 return error; 1762 } 1763 1764 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1765 } 1766 1767 switch (nstate) { 1768 case IEEE80211_S_SCAN: 1769 WPI_RXON_LOCK(sc); 1770 if (wpi_check_bss_filter(sc) != 0) { 1771 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1772 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1773 device_printf(sc->sc_dev, 1774 "%s: could not send RXON\n", __func__); 1775 } 1776 } 1777 WPI_RXON_UNLOCK(sc); 1778 break; 1779 1780 case IEEE80211_S_ASSOC: 1781 if (vap->iv_state != IEEE80211_S_RUN) 1782 break; 1783 /* FALLTHROUGH */ 1784 case IEEE80211_S_AUTH: 1785 /* 1786 * NB: do not optimize AUTH -> AUTH state transmission - 1787 * this will break powersave with non-QoS AP! 1788 */ 1789 1790 /* 1791 * The node must be registered in the firmware before auth. 1792 * Also the associd must be cleared on RUN -> ASSOC 1793 * transitions. 1794 */ 1795 if ((error = wpi_auth(sc, vap)) != 0) { 1796 device_printf(sc->sc_dev, 1797 "%s: could not move to AUTH state, error %d\n", 1798 __func__, error); 1799 } 1800 break; 1801 1802 case IEEE80211_S_RUN: 1803 /* 1804 * RUN -> RUN transition: 1805 * STA mode: Just restart the timers. 1806 * IBSS mode: Process IBSS merge. 1807 */ 1808 if (vap->iv_state == IEEE80211_S_RUN) { 1809 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1810 WPI_RXON_LOCK(sc); 1811 wpi_calib_timeout(sc); 1812 WPI_RXON_UNLOCK(sc); 1813 break; 1814 } else { 1815 /* 1816 * Drop the BSS_FILTER bit 1817 * (there is no another way to change bssid). 1818 */ 1819 WPI_RXON_LOCK(sc); 1820 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1821 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1822 device_printf(sc->sc_dev, 1823 "%s: could not send RXON\n", 1824 __func__); 1825 } 1826 WPI_RXON_UNLOCK(sc); 1827 1828 /* Restore all what was lost. */ 1829 wpi_restore_node_table(sc, wvp); 1830 1831 /* XXX set conditionally? */ 1832 wpi_updateedca(ic); 1833 } 1834 } 1835 1836 /* 1837 * !RUN -> RUN requires setting the association id 1838 * which is done with a firmware cmd. We also defer 1839 * starting the timers until that work is done. 1840 */ 1841 if ((error = wpi_run(sc, vap)) != 0) { 1842 device_printf(sc->sc_dev, 1843 "%s: could not move to RUN state\n", __func__); 1844 } 1845 break; 1846 1847 default: 1848 break; 1849 } 1850 if (error != 0) { 1851 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1852 return error; 1853 } 1854 1855 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1856 1857 return wvp->wv_newstate(vap, nstate, arg); 1858 } 1859 1860 static void 1861 wpi_calib_timeout(void *arg) 1862 { 1863 struct wpi_softc *sc = arg; 1864 1865 if (wpi_check_bss_filter(sc) == 0) 1866 return; 1867 1868 wpi_power_calibration(sc); 1869 1870 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1871 } 1872 1873 static __inline uint8_t 1874 rate2plcp(const uint8_t rate) 1875 { 1876 switch (rate) { 1877 case 12: return 0xd; 1878 case 18: return 0xf; 1879 case 24: return 0x5; 1880 case 36: return 0x7; 1881 case 48: return 0x9; 1882 case 72: return 0xb; 1883 case 96: return 0x1; 1884 case 108: return 0x3; 1885 case 2: return 10; 1886 case 4: return 20; 1887 case 11: return 55; 1888 case 22: return 110; 1889 default: return 0; 1890 } 1891 } 1892 1893 static __inline uint8_t 1894 plcp2rate(const uint8_t plcp) 1895 { 1896 switch (plcp) { 1897 case 0xd: return 12; 1898 case 0xf: return 18; 1899 case 0x5: return 24; 1900 case 0x7: return 36; 1901 case 0x9: return 48; 1902 case 0xb: return 72; 1903 case 0x1: return 96; 1904 case 0x3: return 108; 1905 case 10: return 2; 1906 case 20: return 4; 1907 case 55: return 11; 1908 case 110: return 22; 1909 default: return 0; 1910 } 1911 } 1912 1913 /* Quickly determine if a given rate is CCK or OFDM. */ 1914 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1915 1916 static void 1917 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1918 struct wpi_rx_data *data) 1919 { 1920 struct ieee80211com *ic = &sc->sc_ic; 1921 struct wpi_rx_ring *ring = &sc->rxq; 1922 struct wpi_rx_stat *stat; 1923 struct wpi_rx_head *head; 1924 struct wpi_rx_tail *tail; 1925 struct ieee80211_frame *wh; 1926 struct ieee80211_node *ni; 1927 struct mbuf *m, *m1; 1928 bus_addr_t paddr; 1929 uint32_t flags; 1930 uint16_t len; 1931 int error; 1932 1933 stat = (struct wpi_rx_stat *)(desc + 1); 1934 1935 if (stat->len > WPI_STAT_MAXLEN) { 1936 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1937 goto fail1; 1938 } 1939 1940 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1941 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1942 len = le16toh(head->len); 1943 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1944 flags = le32toh(tail->flags); 1945 1946 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1947 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1948 le32toh(desc->len), len, (int8_t)stat->rssi, 1949 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1950 1951 /* Discard frames with a bad FCS early. */ 1952 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1953 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1954 __func__, flags); 1955 goto fail1; 1956 } 1957 /* Discard frames that are too short. */ 1958 if (len < sizeof (struct ieee80211_frame_ack)) { 1959 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1960 __func__, len); 1961 goto fail1; 1962 } 1963 1964 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1965 if (m1 == NULL) { 1966 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1967 __func__); 1968 goto fail1; 1969 } 1970 bus_dmamap_unload(ring->data_dmat, data->map); 1971 1972 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1973 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1974 if (error != 0 && error != EFBIG) { 1975 device_printf(sc->sc_dev, 1976 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1977 m_freem(m1); 1978 1979 /* Try to reload the old mbuf. */ 1980 error = bus_dmamap_load(ring->data_dmat, data->map, 1981 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1982 &paddr, BUS_DMA_NOWAIT); 1983 if (error != 0 && error != EFBIG) { 1984 panic("%s: could not load old RX mbuf", __func__); 1985 } 1986 /* Physical address may have changed. */ 1987 ring->desc[ring->cur] = htole32(paddr); 1988 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1989 BUS_DMASYNC_PREWRITE); 1990 goto fail1; 1991 } 1992 1993 m = data->m; 1994 data->m = m1; 1995 /* Update RX descriptor. */ 1996 ring->desc[ring->cur] = htole32(paddr); 1997 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1998 BUS_DMASYNC_PREWRITE); 1999 2000 /* Finalize mbuf. */ 2001 m->m_data = (caddr_t)(head + 1); 2002 m->m_pkthdr.len = m->m_len = len; 2003 2004 /* Grab a reference to the source node. */ 2005 wh = mtod(m, struct ieee80211_frame *); 2006 2007 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2008 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2009 /* Check whether decryption was successful or not. */ 2010 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2011 DPRINTF(sc, WPI_DEBUG_RECV, 2012 "CCMP decryption failed 0x%x\n", flags); 2013 goto fail2; 2014 } 2015 m->m_flags |= M_WEP; 2016 } 2017 2018 if (len >= sizeof(struct ieee80211_frame_min)) 2019 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2020 else 2021 ni = NULL; 2022 2023 sc->rx_tstamp = tail->tstamp; 2024 2025 if (ieee80211_radiotap_active(ic)) { 2026 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2027 2028 tap->wr_flags = 0; 2029 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2030 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2031 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2032 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2033 tap->wr_tsft = tail->tstamp; 2034 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2035 tap->wr_rate = plcp2rate(head->plcp); 2036 } 2037 2038 WPI_UNLOCK(sc); 2039 2040 /* Send the frame to the 802.11 layer. */ 2041 if (ni != NULL) { 2042 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2043 /* Node is no longer needed. */ 2044 ieee80211_free_node(ni); 2045 } else 2046 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2047 2048 WPI_LOCK(sc); 2049 2050 return; 2051 2052 fail2: m_freem(m); 2053 2054 fail1: counter_u64_add(ic->ic_ierrors, 1); 2055 } 2056 2057 static void 2058 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2059 struct wpi_rx_data *data) 2060 { 2061 /* Ignore */ 2062 } 2063 2064 static void 2065 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2066 { 2067 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2068 struct wpi_tx_data *data = &ring->data[desc->idx]; 2069 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2070 struct mbuf *m; 2071 struct ieee80211_node *ni; 2072 struct ieee80211vap *vap; 2073 struct ieee80211com *ic; 2074 uint32_t status = le32toh(stat->status); 2075 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2076 2077 KASSERT(data->ni != NULL, ("no node")); 2078 KASSERT(data->m != NULL, ("no mbuf")); 2079 2080 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2081 2082 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2083 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2084 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2085 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2086 2087 /* Unmap and free mbuf. */ 2088 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2089 bus_dmamap_unload(ring->data_dmat, data->map); 2090 m = data->m, data->m = NULL; 2091 ni = data->ni, data->ni = NULL; 2092 vap = ni->ni_vap; 2093 ic = vap->iv_ic; 2094 2095 /* 2096 * Update rate control statistics for the node. 2097 */ 2098 if (status & WPI_TX_STATUS_FAIL) { 2099 ieee80211_ratectl_tx_complete(vap, ni, 2100 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2101 } else 2102 ieee80211_ratectl_tx_complete(vap, ni, 2103 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2104 2105 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2106 2107 WPI_TXQ_STATE_LOCK(sc); 2108 ring->queued -= 1; 2109 if (ring->queued > 0) { 2110 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2111 2112 if ((sc->qfullmsk & (1 << ring->qid)) != 0 && 2113 ring->queued < WPI_TX_RING_LOMARK) { 2114 sc->qfullmsk &= ~(1 << ring->qid); 2115 ieee80211_runtask(ic, &sc->sc_start_task); 2116 } 2117 } else 2118 callout_stop(&sc->tx_timeout); 2119 WPI_TXQ_STATE_UNLOCK(sc); 2120 2121 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2122 } 2123 2124 /* 2125 * Process a "command done" firmware notification. This is where we wakeup 2126 * processes waiting for a synchronous command completion. 2127 */ 2128 static void 2129 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2130 { 2131 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2132 struct wpi_tx_data *data; 2133 2134 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2135 "type %s len %d\n", desc->qid, desc->idx, 2136 desc->flags, wpi_cmd_str(desc->type), 2137 le32toh(desc->len)); 2138 2139 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2140 return; /* Not a command ack. */ 2141 2142 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2143 2144 data = &ring->data[desc->idx]; 2145 2146 /* If the command was mapped in an mbuf, free it. */ 2147 if (data->m != NULL) { 2148 bus_dmamap_sync(ring->data_dmat, data->map, 2149 BUS_DMASYNC_POSTWRITE); 2150 bus_dmamap_unload(ring->data_dmat, data->map); 2151 m_freem(data->m); 2152 data->m = NULL; 2153 } 2154 2155 wakeup(&ring->cmd[desc->idx]); 2156 2157 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2158 WPI_TXQ_LOCK(sc); 2159 if (sc->sc_flags & WPI_PS_PATH) { 2160 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2161 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2162 } else { 2163 sc->sc_update_rx_ring = wpi_update_rx_ring; 2164 sc->sc_update_tx_ring = wpi_update_tx_ring; 2165 } 2166 WPI_TXQ_UNLOCK(sc); 2167 } 2168 } 2169 2170 static void 2171 wpi_notif_intr(struct wpi_softc *sc) 2172 { 2173 struct ieee80211com *ic = &sc->sc_ic; 2174 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2175 uint32_t hw; 2176 2177 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2178 BUS_DMASYNC_POSTREAD); 2179 2180 hw = le32toh(sc->shared->next) & 0xfff; 2181 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2182 2183 while (sc->rxq.cur != hw) { 2184 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2185 2186 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2187 struct wpi_rx_desc *desc; 2188 2189 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2190 BUS_DMASYNC_POSTREAD); 2191 desc = mtod(data->m, struct wpi_rx_desc *); 2192 2193 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2194 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2195 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2196 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2197 2198 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2199 /* Reply to a command. */ 2200 wpi_cmd_done(sc, desc); 2201 } 2202 2203 switch (desc->type) { 2204 case WPI_RX_DONE: 2205 /* An 802.11 frame has been received. */ 2206 wpi_rx_done(sc, desc, data); 2207 2208 if (sc->sc_running == 0) { 2209 /* wpi_stop() was called. */ 2210 return; 2211 } 2212 2213 break; 2214 2215 case WPI_TX_DONE: 2216 /* An 802.11 frame has been transmitted. */ 2217 wpi_tx_done(sc, desc); 2218 break; 2219 2220 case WPI_RX_STATISTICS: 2221 case WPI_BEACON_STATISTICS: 2222 wpi_rx_statistics(sc, desc, data); 2223 break; 2224 2225 case WPI_BEACON_MISSED: 2226 { 2227 struct wpi_beacon_missed *miss = 2228 (struct wpi_beacon_missed *)(desc + 1); 2229 uint32_t expected, misses, received, threshold; 2230 2231 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2232 BUS_DMASYNC_POSTREAD); 2233 2234 misses = le32toh(miss->consecutive); 2235 expected = le32toh(miss->expected); 2236 received = le32toh(miss->received); 2237 threshold = MAX(2, vap->iv_bmissthreshold); 2238 2239 DPRINTF(sc, WPI_DEBUG_BMISS, 2240 "%s: beacons missed %u(%u) (received %u/%u)\n", 2241 __func__, misses, le32toh(miss->total), received, 2242 expected); 2243 2244 if (misses >= threshold || 2245 (received == 0 && expected >= threshold)) { 2246 WPI_RXON_LOCK(sc); 2247 if (callout_pending(&sc->scan_timeout)) { 2248 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2249 0, 1); 2250 } 2251 WPI_RXON_UNLOCK(sc); 2252 if (vap->iv_state == IEEE80211_S_RUN && 2253 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2254 ieee80211_beacon_miss(ic); 2255 } 2256 2257 break; 2258 } 2259 #ifdef WPI_DEBUG 2260 case WPI_BEACON_SENT: 2261 { 2262 struct wpi_tx_stat *stat = 2263 (struct wpi_tx_stat *)(desc + 1); 2264 uint64_t *tsf = (uint64_t *)(stat + 1); 2265 uint32_t *mode = (uint32_t *)(tsf + 1); 2266 2267 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2268 BUS_DMASYNC_POSTREAD); 2269 2270 DPRINTF(sc, WPI_DEBUG_BEACON, 2271 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2272 "duration %u, status %x, tsf %ju, mode %x\n", 2273 stat->rtsfailcnt, stat->ackfailcnt, 2274 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2275 le32toh(stat->status), *tsf, *mode); 2276 2277 break; 2278 } 2279 #endif 2280 case WPI_UC_READY: 2281 { 2282 struct wpi_ucode_info *uc = 2283 (struct wpi_ucode_info *)(desc + 1); 2284 2285 /* The microcontroller is ready. */ 2286 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2287 BUS_DMASYNC_POSTREAD); 2288 DPRINTF(sc, WPI_DEBUG_RESET, 2289 "microcode alive notification version=%d.%d " 2290 "subtype=%x alive=%x\n", uc->major, uc->minor, 2291 uc->subtype, le32toh(uc->valid)); 2292 2293 if (le32toh(uc->valid) != 1) { 2294 device_printf(sc->sc_dev, 2295 "microcontroller initialization failed\n"); 2296 wpi_stop_locked(sc); 2297 return; 2298 } 2299 /* Save the address of the error log in SRAM. */ 2300 sc->errptr = le32toh(uc->errptr); 2301 break; 2302 } 2303 case WPI_STATE_CHANGED: 2304 { 2305 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2306 BUS_DMASYNC_POSTREAD); 2307 2308 uint32_t *status = (uint32_t *)(desc + 1); 2309 2310 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2311 le32toh(*status)); 2312 2313 if (le32toh(*status) & 1) { 2314 WPI_NT_LOCK(sc); 2315 wpi_clear_node_table(sc); 2316 WPI_NT_UNLOCK(sc); 2317 taskqueue_enqueue(sc->sc_tq, 2318 &sc->sc_radiooff_task); 2319 return; 2320 } 2321 break; 2322 } 2323 #ifdef WPI_DEBUG 2324 case WPI_START_SCAN: 2325 { 2326 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2327 BUS_DMASYNC_POSTREAD); 2328 2329 struct wpi_start_scan *scan = 2330 (struct wpi_start_scan *)(desc + 1); 2331 DPRINTF(sc, WPI_DEBUG_SCAN, 2332 "%s: scanning channel %d status %x\n", 2333 __func__, scan->chan, le32toh(scan->status)); 2334 2335 break; 2336 } 2337 #endif 2338 case WPI_STOP_SCAN: 2339 { 2340 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2341 BUS_DMASYNC_POSTREAD); 2342 2343 struct wpi_stop_scan *scan = 2344 (struct wpi_stop_scan *)(desc + 1); 2345 2346 DPRINTF(sc, WPI_DEBUG_SCAN, 2347 "scan finished nchan=%d status=%d chan=%d\n", 2348 scan->nchan, scan->status, scan->chan); 2349 2350 WPI_RXON_LOCK(sc); 2351 callout_stop(&sc->scan_timeout); 2352 WPI_RXON_UNLOCK(sc); 2353 if (scan->status == WPI_SCAN_ABORTED) 2354 ieee80211_cancel_scan(vap); 2355 else 2356 ieee80211_scan_next(vap); 2357 break; 2358 } 2359 } 2360 2361 if (sc->rxq.cur % 8 == 0) { 2362 /* Tell the firmware what we have processed. */ 2363 sc->sc_update_rx_ring(sc); 2364 } 2365 } 2366 } 2367 2368 /* 2369 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2370 * from power-down sleep mode. 2371 */ 2372 static void 2373 wpi_wakeup_intr(struct wpi_softc *sc) 2374 { 2375 int qid; 2376 2377 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2378 "%s: ucode wakeup from power-down sleep\n", __func__); 2379 2380 /* Wakeup RX and TX rings. */ 2381 if (sc->rxq.update) { 2382 sc->rxq.update = 0; 2383 wpi_update_rx_ring(sc); 2384 } 2385 WPI_TXQ_LOCK(sc); 2386 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2387 struct wpi_tx_ring *ring = &sc->txq[qid]; 2388 2389 if (ring->update) { 2390 ring->update = 0; 2391 wpi_update_tx_ring(sc, ring); 2392 } 2393 } 2394 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2395 WPI_TXQ_UNLOCK(sc); 2396 } 2397 2398 /* 2399 * This function prints firmware registers 2400 */ 2401 #ifdef WPI_DEBUG 2402 static void 2403 wpi_debug_registers(struct wpi_softc *sc) 2404 { 2405 size_t i; 2406 static const uint32_t csr_tbl[] = { 2407 WPI_HW_IF_CONFIG, 2408 WPI_INT, 2409 WPI_INT_MASK, 2410 WPI_FH_INT, 2411 WPI_GPIO_IN, 2412 WPI_RESET, 2413 WPI_GP_CNTRL, 2414 WPI_EEPROM, 2415 WPI_EEPROM_GP, 2416 WPI_GIO, 2417 WPI_UCODE_GP1, 2418 WPI_UCODE_GP2, 2419 WPI_GIO_CHICKEN, 2420 WPI_ANA_PLL, 2421 WPI_DBG_HPET_MEM, 2422 }; 2423 static const uint32_t prph_tbl[] = { 2424 WPI_APMG_CLK_CTRL, 2425 WPI_APMG_PS, 2426 WPI_APMG_PCI_STT, 2427 WPI_APMG_RFKILL, 2428 }; 2429 2430 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2431 2432 for (i = 0; i < nitems(csr_tbl); i++) { 2433 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2434 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2435 2436 if ((i + 1) % 2 == 0) 2437 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2438 } 2439 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2440 2441 if (wpi_nic_lock(sc) == 0) { 2442 for (i = 0; i < nitems(prph_tbl); i++) { 2443 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2444 wpi_get_prph_string(prph_tbl[i]), 2445 wpi_prph_read(sc, prph_tbl[i])); 2446 2447 if ((i + 1) % 2 == 0) 2448 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2449 } 2450 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2451 wpi_nic_unlock(sc); 2452 } else { 2453 DPRINTF(sc, WPI_DEBUG_REGISTER, 2454 "Cannot access internal registers.\n"); 2455 } 2456 } 2457 #endif 2458 2459 /* 2460 * Dump the error log of the firmware when a firmware panic occurs. Although 2461 * we can't debug the firmware because it is neither open source nor free, it 2462 * can help us to identify certain classes of problems. 2463 */ 2464 static void 2465 wpi_fatal_intr(struct wpi_softc *sc) 2466 { 2467 struct wpi_fw_dump dump; 2468 uint32_t i, offset, count; 2469 2470 /* Check that the error log address is valid. */ 2471 if (sc->errptr < WPI_FW_DATA_BASE || 2472 sc->errptr + sizeof (dump) > 2473 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2474 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2475 sc->errptr); 2476 return; 2477 } 2478 if (wpi_nic_lock(sc) != 0) { 2479 printf("%s: could not read firmware error log\n", __func__); 2480 return; 2481 } 2482 /* Read number of entries in the log. */ 2483 count = wpi_mem_read(sc, sc->errptr); 2484 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2485 printf("%s: invalid count field (count = %u)\n", __func__, 2486 count); 2487 wpi_nic_unlock(sc); 2488 return; 2489 } 2490 /* Skip "count" field. */ 2491 offset = sc->errptr + sizeof (uint32_t); 2492 printf("firmware error log (count = %u):\n", count); 2493 for (i = 0; i < count; i++) { 2494 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2495 sizeof (dump) / sizeof (uint32_t)); 2496 2497 printf(" error type = \"%s\" (0x%08X)\n", 2498 (dump.desc < nitems(wpi_fw_errmsg)) ? 2499 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2500 dump.desc); 2501 printf(" error data = 0x%08X\n", 2502 dump.data); 2503 printf(" branch link = 0x%08X%08X\n", 2504 dump.blink[0], dump.blink[1]); 2505 printf(" interrupt link = 0x%08X%08X\n", 2506 dump.ilink[0], dump.ilink[1]); 2507 printf(" time = %u\n", dump.time); 2508 2509 offset += sizeof (dump); 2510 } 2511 wpi_nic_unlock(sc); 2512 /* Dump driver status (TX and RX rings) while we're here. */ 2513 printf("driver status:\n"); 2514 WPI_TXQ_LOCK(sc); 2515 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2516 struct wpi_tx_ring *ring = &sc->txq[i]; 2517 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2518 i, ring->qid, ring->cur, ring->queued); 2519 } 2520 WPI_TXQ_UNLOCK(sc); 2521 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2522 } 2523 2524 static void 2525 wpi_intr(void *arg) 2526 { 2527 struct wpi_softc *sc = arg; 2528 uint32_t r1, r2; 2529 2530 WPI_LOCK(sc); 2531 2532 /* Disable interrupts. */ 2533 WPI_WRITE(sc, WPI_INT_MASK, 0); 2534 2535 r1 = WPI_READ(sc, WPI_INT); 2536 2537 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2538 goto end; /* Hardware gone! */ 2539 2540 r2 = WPI_READ(sc, WPI_FH_INT); 2541 2542 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2543 r1, r2); 2544 2545 if (r1 == 0 && r2 == 0) 2546 goto done; /* Interrupt not for us. */ 2547 2548 /* Acknowledge interrupts. */ 2549 WPI_WRITE(sc, WPI_INT, r1); 2550 WPI_WRITE(sc, WPI_FH_INT, r2); 2551 2552 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2553 device_printf(sc->sc_dev, "fatal firmware error\n"); 2554 #ifdef WPI_DEBUG 2555 wpi_debug_registers(sc); 2556 #endif 2557 wpi_fatal_intr(sc); 2558 DPRINTF(sc, WPI_DEBUG_HW, 2559 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2560 "(Hardware Error)"); 2561 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2562 goto end; 2563 } 2564 2565 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2566 (r2 & WPI_FH_INT_RX)) 2567 wpi_notif_intr(sc); 2568 2569 if (r1 & WPI_INT_ALIVE) 2570 wakeup(sc); /* Firmware is alive. */ 2571 2572 if (r1 & WPI_INT_WAKEUP) 2573 wpi_wakeup_intr(sc); 2574 2575 done: 2576 /* Re-enable interrupts. */ 2577 if (sc->sc_running) 2578 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2579 2580 end: WPI_UNLOCK(sc); 2581 } 2582 2583 static int 2584 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2585 { 2586 struct ieee80211_frame *wh; 2587 struct wpi_tx_cmd *cmd; 2588 struct wpi_tx_data *data; 2589 struct wpi_tx_desc *desc; 2590 struct wpi_tx_ring *ring; 2591 struct mbuf *m1; 2592 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2593 int error, i, hdrlen, nsegs, totlen, pad; 2594 2595 WPI_TXQ_LOCK(sc); 2596 2597 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2598 2599 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2600 2601 if (sc->sc_running == 0) { 2602 /* wpi_stop() was called */ 2603 error = ENETDOWN; 2604 goto fail; 2605 } 2606 2607 wh = mtod(buf->m, struct ieee80211_frame *); 2608 hdrlen = ieee80211_anyhdrsize(wh); 2609 totlen = buf->m->m_pkthdr.len; 2610 2611 if (hdrlen & 3) { 2612 /* First segment length must be a multiple of 4. */ 2613 pad = 4 - (hdrlen & 3); 2614 } else 2615 pad = 0; 2616 2617 ring = &sc->txq[buf->ac]; 2618 desc = &ring->desc[ring->cur]; 2619 data = &ring->data[ring->cur]; 2620 2621 /* Prepare TX firmware command. */ 2622 cmd = &ring->cmd[ring->cur]; 2623 cmd->code = buf->code; 2624 cmd->flags = 0; 2625 cmd->qid = ring->qid; 2626 cmd->idx = ring->cur; 2627 2628 memcpy(cmd->data, buf->data, buf->size); 2629 2630 /* Save and trim IEEE802.11 header. */ 2631 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2632 m_adj(buf->m, hdrlen); 2633 2634 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2635 segs, &nsegs, BUS_DMA_NOWAIT); 2636 if (error != 0 && error != EFBIG) { 2637 device_printf(sc->sc_dev, 2638 "%s: can't map mbuf (error %d)\n", __func__, error); 2639 goto fail; 2640 } 2641 if (error != 0) { 2642 /* Too many DMA segments, linearize mbuf. */ 2643 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2644 if (m1 == NULL) { 2645 device_printf(sc->sc_dev, 2646 "%s: could not defrag mbuf\n", __func__); 2647 error = ENOBUFS; 2648 goto fail; 2649 } 2650 buf->m = m1; 2651 2652 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2653 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2654 if (error != 0) { 2655 device_printf(sc->sc_dev, 2656 "%s: can't map mbuf (error %d)\n", __func__, 2657 error); 2658 goto fail; 2659 } 2660 } 2661 2662 KASSERT(nsegs < WPI_MAX_SCATTER, 2663 ("too many DMA segments, nsegs (%d) should be less than %d", 2664 nsegs, WPI_MAX_SCATTER)); 2665 2666 data->m = buf->m; 2667 data->ni = buf->ni; 2668 2669 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2670 __func__, ring->qid, ring->cur, totlen, nsegs); 2671 2672 /* Fill TX descriptor. */ 2673 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2674 /* First DMA segment is used by the TX command. */ 2675 desc->segs[0].addr = htole32(data->cmd_paddr); 2676 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2677 /* Other DMA segments are for data payload. */ 2678 seg = &segs[0]; 2679 for (i = 1; i <= nsegs; i++) { 2680 desc->segs[i].addr = htole32(seg->ds_addr); 2681 desc->segs[i].len = htole32(seg->ds_len); 2682 seg++; 2683 } 2684 2685 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2686 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2687 BUS_DMASYNC_PREWRITE); 2688 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2689 BUS_DMASYNC_PREWRITE); 2690 2691 /* Kick TX ring. */ 2692 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2693 sc->sc_update_tx_ring(sc, ring); 2694 2695 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2696 /* Mark TX ring as full if we reach a certain threshold. */ 2697 WPI_TXQ_STATE_LOCK(sc); 2698 if (++ring->queued > WPI_TX_RING_HIMARK) 2699 sc->qfullmsk |= 1 << ring->qid; 2700 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2701 WPI_TXQ_STATE_UNLOCK(sc); 2702 } 2703 2704 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2705 2706 WPI_TXQ_UNLOCK(sc); 2707 2708 return 0; 2709 2710 fail: m_freem(buf->m); 2711 2712 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2713 2714 WPI_TXQ_UNLOCK(sc); 2715 2716 return error; 2717 } 2718 2719 /* 2720 * Construct the data packet for a transmit buffer. 2721 */ 2722 static int 2723 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2724 { 2725 const struct ieee80211_txparam *tp; 2726 struct ieee80211vap *vap = ni->ni_vap; 2727 struct ieee80211com *ic = ni->ni_ic; 2728 struct wpi_node *wn = WPI_NODE(ni); 2729 struct ieee80211_channel *chan; 2730 struct ieee80211_frame *wh; 2731 struct ieee80211_key *k = NULL; 2732 struct wpi_buf tx_data; 2733 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2734 uint32_t flags; 2735 uint16_t qos; 2736 uint8_t tid, type; 2737 int ac, error, swcrypt, rate, ismcast, totlen; 2738 2739 wh = mtod(m, struct ieee80211_frame *); 2740 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2741 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2742 2743 /* Select EDCA Access Category and TX ring for this frame. */ 2744 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2745 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2746 tid = qos & IEEE80211_QOS_TID; 2747 } else { 2748 qos = 0; 2749 tid = 0; 2750 } 2751 ac = M_WME_GETAC(m); 2752 2753 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2754 ni->ni_chan : ic->ic_curchan; 2755 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2756 2757 /* Choose a TX rate index. */ 2758 if (type == IEEE80211_FC0_TYPE_MGT) 2759 rate = tp->mgmtrate; 2760 else if (ismcast) 2761 rate = tp->mcastrate; 2762 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2763 rate = tp->ucastrate; 2764 else if (m->m_flags & M_EAPOL) 2765 rate = tp->mgmtrate; 2766 else { 2767 /* XXX pass pktlen */ 2768 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2769 rate = ni->ni_txrate; 2770 } 2771 2772 /* Encrypt the frame if need be. */ 2773 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2774 /* Retrieve key for TX. */ 2775 k = ieee80211_crypto_encap(ni, m); 2776 if (k == NULL) { 2777 error = ENOBUFS; 2778 goto fail; 2779 } 2780 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2781 2782 /* 802.11 header may have moved. */ 2783 wh = mtod(m, struct ieee80211_frame *); 2784 } 2785 totlen = m->m_pkthdr.len; 2786 2787 if (ieee80211_radiotap_active_vap(vap)) { 2788 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2789 2790 tap->wt_flags = 0; 2791 tap->wt_rate = rate; 2792 if (k != NULL) 2793 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2794 2795 ieee80211_radiotap_tx(vap, m); 2796 } 2797 2798 flags = 0; 2799 if (!ismcast) { 2800 /* Unicast frame, check if an ACK is expected. */ 2801 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2802 IEEE80211_QOS_ACKPOLICY_NOACK) 2803 flags |= WPI_TX_NEED_ACK; 2804 } 2805 2806 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2807 flags |= WPI_TX_AUTO_SEQ; 2808 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2809 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2810 2811 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2812 if (!ismcast) { 2813 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2814 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2815 flags |= WPI_TX_NEED_RTS; 2816 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2817 WPI_RATE_IS_OFDM(rate)) { 2818 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2819 flags |= WPI_TX_NEED_CTS; 2820 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2821 flags |= WPI_TX_NEED_RTS; 2822 } 2823 2824 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2825 flags |= WPI_TX_FULL_TXOP; 2826 } 2827 2828 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2829 if (type == IEEE80211_FC0_TYPE_MGT) { 2830 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2831 2832 /* Tell HW to set timestamp in probe responses. */ 2833 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2834 flags |= WPI_TX_INSERT_TSTAMP; 2835 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2836 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2837 tx->timeout = htole16(3); 2838 else 2839 tx->timeout = htole16(2); 2840 } 2841 2842 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2843 tx->id = WPI_ID_BROADCAST; 2844 else { 2845 if (wn->id == WPI_ID_UNDEFINED) { 2846 device_printf(sc->sc_dev, 2847 "%s: undefined node id\n", __func__); 2848 error = EINVAL; 2849 goto fail; 2850 } 2851 2852 tx->id = wn->id; 2853 } 2854 2855 if (k != NULL && !swcrypt) { 2856 switch (k->wk_cipher->ic_cipher) { 2857 case IEEE80211_CIPHER_AES_CCM: 2858 tx->security = WPI_CIPHER_CCMP; 2859 break; 2860 2861 default: 2862 break; 2863 } 2864 2865 memcpy(tx->key, k->wk_key, k->wk_keylen); 2866 } 2867 2868 tx->len = htole16(totlen); 2869 tx->flags = htole32(flags); 2870 tx->plcp = rate2plcp(rate); 2871 tx->tid = tid; 2872 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2873 tx->ofdm_mask = 0xff; 2874 tx->cck_mask = 0x0f; 2875 tx->rts_ntries = 7; 2876 tx->data_ntries = tp->maxretry; 2877 2878 tx_data.ni = ni; 2879 tx_data.m = m; 2880 tx_data.size = sizeof(struct wpi_cmd_data); 2881 tx_data.code = WPI_CMD_TX_DATA; 2882 tx_data.ac = ac; 2883 2884 return wpi_cmd2(sc, &tx_data); 2885 2886 fail: m_freem(m); 2887 return error; 2888 } 2889 2890 static int 2891 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2892 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2893 { 2894 struct ieee80211vap *vap = ni->ni_vap; 2895 struct ieee80211_key *k = NULL; 2896 struct ieee80211_frame *wh; 2897 struct wpi_buf tx_data; 2898 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2899 uint32_t flags; 2900 uint8_t type; 2901 int ac, rate, swcrypt, totlen; 2902 2903 wh = mtod(m, struct ieee80211_frame *); 2904 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2905 2906 ac = params->ibp_pri & 3; 2907 2908 /* Choose a TX rate index. */ 2909 rate = params->ibp_rate0; 2910 2911 flags = 0; 2912 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2913 flags |= WPI_TX_AUTO_SEQ; 2914 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2915 flags |= WPI_TX_NEED_ACK; 2916 if (params->ibp_flags & IEEE80211_BPF_RTS) 2917 flags |= WPI_TX_NEED_RTS; 2918 if (params->ibp_flags & IEEE80211_BPF_CTS) 2919 flags |= WPI_TX_NEED_CTS; 2920 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2921 flags |= WPI_TX_FULL_TXOP; 2922 2923 /* Encrypt the frame if need be. */ 2924 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2925 /* Retrieve key for TX. */ 2926 k = ieee80211_crypto_encap(ni, m); 2927 if (k == NULL) { 2928 m_freem(m); 2929 return ENOBUFS; 2930 } 2931 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2932 2933 /* 802.11 header may have moved. */ 2934 wh = mtod(m, struct ieee80211_frame *); 2935 } 2936 totlen = m->m_pkthdr.len; 2937 2938 if (ieee80211_radiotap_active_vap(vap)) { 2939 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2940 2941 tap->wt_flags = 0; 2942 tap->wt_rate = rate; 2943 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2944 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2945 2946 ieee80211_radiotap_tx(vap, m); 2947 } 2948 2949 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2950 if (type == IEEE80211_FC0_TYPE_MGT) { 2951 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2952 2953 /* Tell HW to set timestamp in probe responses. */ 2954 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2955 flags |= WPI_TX_INSERT_TSTAMP; 2956 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2957 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2958 tx->timeout = htole16(3); 2959 else 2960 tx->timeout = htole16(2); 2961 } 2962 2963 if (k != NULL && !swcrypt) { 2964 switch (k->wk_cipher->ic_cipher) { 2965 case IEEE80211_CIPHER_AES_CCM: 2966 tx->security = WPI_CIPHER_CCMP; 2967 break; 2968 2969 default: 2970 break; 2971 } 2972 2973 memcpy(tx->key, k->wk_key, k->wk_keylen); 2974 } 2975 2976 tx->len = htole16(totlen); 2977 tx->flags = htole32(flags); 2978 tx->plcp = rate2plcp(rate); 2979 tx->id = WPI_ID_BROADCAST; 2980 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2981 tx->rts_ntries = params->ibp_try1; 2982 tx->data_ntries = params->ibp_try0; 2983 2984 tx_data.ni = ni; 2985 tx_data.m = m; 2986 tx_data.size = sizeof(struct wpi_cmd_data); 2987 tx_data.code = WPI_CMD_TX_DATA; 2988 tx_data.ac = ac; 2989 2990 return wpi_cmd2(sc, &tx_data); 2991 } 2992 2993 static __inline int 2994 wpi_tx_ring_is_full(struct wpi_softc *sc, int ac) 2995 { 2996 struct wpi_tx_ring *ring = &sc->txq[ac]; 2997 int retval; 2998 2999 WPI_TXQ_STATE_LOCK(sc); 3000 retval = (ring->queued > WPI_TX_RING_HIMARK); 3001 WPI_TXQ_STATE_UNLOCK(sc); 3002 3003 return retval; 3004 } 3005 3006 static __inline void 3007 wpi_handle_tx_failure(struct ieee80211_node *ni) 3008 { 3009 /* NB: m is reclaimed on tx failure */ 3010 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3011 ieee80211_free_node(ni); 3012 } 3013 3014 static int 3015 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3016 const struct ieee80211_bpf_params *params) 3017 { 3018 struct ieee80211com *ic = ni->ni_ic; 3019 struct wpi_softc *sc = ic->ic_softc; 3020 int ac, error = 0; 3021 3022 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3023 3024 ac = M_WME_GETAC(m); 3025 3026 WPI_TX_LOCK(sc); 3027 3028 if (sc->sc_running == 0 || wpi_tx_ring_is_full(sc, ac)) { 3029 m_freem(m); 3030 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3031 goto unlock; 3032 } 3033 3034 if (params == NULL) { 3035 /* 3036 * Legacy path; interpret frame contents to decide 3037 * precisely how to send the frame. 3038 */ 3039 error = wpi_tx_data(sc, m, ni); 3040 } else { 3041 /* 3042 * Caller supplied explicit parameters to use in 3043 * sending the frame. 3044 */ 3045 error = wpi_tx_data_raw(sc, m, ni, params); 3046 } 3047 3048 unlock: WPI_TX_UNLOCK(sc); 3049 3050 if (error != 0) { 3051 wpi_handle_tx_failure(ni); 3052 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3053 3054 return error; 3055 } 3056 3057 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3058 3059 return 0; 3060 } 3061 3062 static int 3063 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3064 { 3065 struct wpi_softc *sc = ic->ic_softc; 3066 struct ieee80211_node *ni; 3067 struct mbufq *sndq; 3068 int ac, error; 3069 3070 WPI_TX_LOCK(sc); 3071 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3072 3073 /* Check if interface is up & running. */ 3074 if (sc->sc_running == 0) { 3075 error = ENXIO; 3076 goto unlock; 3077 } 3078 3079 /* Check for available space. */ 3080 ac = M_WME_GETAC(m); 3081 sndq = &sc->txq[ac].snd; 3082 if (wpi_tx_ring_is_full(sc, ac) || mbufq_len(sndq) != 0) { 3083 /* wpi_tx_done() will dequeue it. */ 3084 error = mbufq_enqueue(sndq, m); 3085 goto unlock; 3086 } 3087 3088 error = 0; 3089 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3090 if (wpi_tx_data(sc, m, ni) != 0) { 3091 wpi_handle_tx_failure(ni); 3092 } 3093 3094 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3095 3096 unlock: WPI_TX_UNLOCK(sc); 3097 3098 return (error); 3099 } 3100 3101 /** 3102 * Process data waiting to be sent on the output queue 3103 */ 3104 static void 3105 wpi_start(void *arg0, int pending) 3106 { 3107 struct wpi_softc *sc = arg0; 3108 struct ieee80211_node *ni; 3109 struct mbuf *m; 3110 uint8_t i; 3111 3112 WPI_TX_LOCK(sc); 3113 if (sc->sc_running == 0) 3114 goto unlock; 3115 3116 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3117 3118 for (i = 0; i < WPI_CMD_QUEUE_NUM; i++) { 3119 struct mbufq *sndq = &sc->txq[i].snd; 3120 3121 for (;;) { 3122 if (wpi_tx_ring_is_full(sc, i)) 3123 break; 3124 3125 if ((m = mbufq_dequeue(sndq)) == NULL) 3126 break; 3127 3128 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3129 if (wpi_tx_data(sc, m, ni) != 0) { 3130 wpi_handle_tx_failure(ni); 3131 } 3132 } 3133 } 3134 3135 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3136 unlock: WPI_TX_UNLOCK(sc); 3137 } 3138 3139 static void 3140 wpi_watchdog_rfkill(void *arg) 3141 { 3142 struct wpi_softc *sc = arg; 3143 struct ieee80211com *ic = &sc->sc_ic; 3144 3145 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3146 3147 /* No need to lock firmware memory. */ 3148 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3149 /* Radio kill switch is still off. */ 3150 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3151 sc); 3152 } else 3153 ieee80211_runtask(ic, &sc->sc_radioon_task); 3154 } 3155 3156 static void 3157 wpi_scan_timeout(void *arg) 3158 { 3159 struct wpi_softc *sc = arg; 3160 struct ieee80211com *ic = &sc->sc_ic; 3161 3162 ic_printf(ic, "scan timeout\n"); 3163 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3164 } 3165 3166 static void 3167 wpi_tx_timeout(void *arg) 3168 { 3169 struct wpi_softc *sc = arg; 3170 struct ieee80211com *ic = &sc->sc_ic; 3171 3172 ic_printf(ic, "device timeout\n"); 3173 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3174 } 3175 3176 static void 3177 wpi_parent(struct ieee80211com *ic) 3178 { 3179 struct wpi_softc *sc = ic->ic_softc; 3180 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3181 3182 if (ic->ic_nrunning > 0) { 3183 if (wpi_init(sc) == 0) { 3184 ieee80211_notify_radio(ic, 1); 3185 ieee80211_start_all(ic); 3186 } else { 3187 ieee80211_notify_radio(ic, 0); 3188 ieee80211_stop(vap); 3189 } 3190 } else 3191 wpi_stop(sc); 3192 } 3193 3194 /* 3195 * Send a command to the firmware. 3196 */ 3197 static int 3198 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3199 int async) 3200 { 3201 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3202 struct wpi_tx_desc *desc; 3203 struct wpi_tx_data *data; 3204 struct wpi_tx_cmd *cmd; 3205 struct mbuf *m; 3206 bus_addr_t paddr; 3207 int totlen, error; 3208 3209 WPI_TXQ_LOCK(sc); 3210 3211 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3212 3213 if (sc->sc_running == 0) { 3214 /* wpi_stop() was called */ 3215 if (code == WPI_CMD_SCAN) 3216 error = ENETDOWN; 3217 else 3218 error = 0; 3219 3220 goto fail; 3221 } 3222 3223 if (async == 0) 3224 WPI_LOCK_ASSERT(sc); 3225 3226 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3227 __func__, wpi_cmd_str(code), size, async); 3228 3229 desc = &ring->desc[ring->cur]; 3230 data = &ring->data[ring->cur]; 3231 totlen = 4 + size; 3232 3233 if (size > sizeof cmd->data) { 3234 /* Command is too large to fit in a descriptor. */ 3235 if (totlen > MCLBYTES) { 3236 error = EINVAL; 3237 goto fail; 3238 } 3239 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3240 if (m == NULL) { 3241 error = ENOMEM; 3242 goto fail; 3243 } 3244 cmd = mtod(m, struct wpi_tx_cmd *); 3245 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3246 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3247 if (error != 0) { 3248 m_freem(m); 3249 goto fail; 3250 } 3251 data->m = m; 3252 } else { 3253 cmd = &ring->cmd[ring->cur]; 3254 paddr = data->cmd_paddr; 3255 } 3256 3257 cmd->code = code; 3258 cmd->flags = 0; 3259 cmd->qid = ring->qid; 3260 cmd->idx = ring->cur; 3261 memcpy(cmd->data, buf, size); 3262 3263 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3264 desc->segs[0].addr = htole32(paddr); 3265 desc->segs[0].len = htole32(totlen); 3266 3267 if (size > sizeof cmd->data) { 3268 bus_dmamap_sync(ring->data_dmat, data->map, 3269 BUS_DMASYNC_PREWRITE); 3270 } else { 3271 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3272 BUS_DMASYNC_PREWRITE); 3273 } 3274 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3275 BUS_DMASYNC_PREWRITE); 3276 3277 /* Kick command ring. */ 3278 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3279 sc->sc_update_tx_ring(sc, ring); 3280 3281 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3282 3283 WPI_TXQ_UNLOCK(sc); 3284 3285 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3286 3287 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3288 3289 WPI_TXQ_UNLOCK(sc); 3290 3291 return error; 3292 } 3293 3294 /* 3295 * Configure HW multi-rate retries. 3296 */ 3297 static int 3298 wpi_mrr_setup(struct wpi_softc *sc) 3299 { 3300 struct ieee80211com *ic = &sc->sc_ic; 3301 struct wpi_mrr_setup mrr; 3302 int i, error; 3303 3304 /* CCK rates (not used with 802.11a). */ 3305 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3306 mrr.rates[i].flags = 0; 3307 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3308 /* Fallback to the immediate lower CCK rate (if any.) */ 3309 mrr.rates[i].next = 3310 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3311 /* Try twice at this rate before falling back to "next". */ 3312 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3313 } 3314 /* OFDM rates (not used with 802.11b). */ 3315 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3316 mrr.rates[i].flags = 0; 3317 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3318 /* Fallback to the immediate lower rate (if any.) */ 3319 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3320 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3321 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3322 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3323 i - 1; 3324 /* Try twice at this rate before falling back to "next". */ 3325 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3326 } 3327 /* Setup MRR for control frames. */ 3328 mrr.which = htole32(WPI_MRR_CTL); 3329 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3330 if (error != 0) { 3331 device_printf(sc->sc_dev, 3332 "could not setup MRR for control frames\n"); 3333 return error; 3334 } 3335 /* Setup MRR for data frames. */ 3336 mrr.which = htole32(WPI_MRR_DATA); 3337 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3338 if (error != 0) { 3339 device_printf(sc->sc_dev, 3340 "could not setup MRR for data frames\n"); 3341 return error; 3342 } 3343 return 0; 3344 } 3345 3346 static int 3347 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3348 { 3349 struct ieee80211com *ic = ni->ni_ic; 3350 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3351 struct wpi_node *wn = WPI_NODE(ni); 3352 struct wpi_node_info node; 3353 int error; 3354 3355 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3356 3357 if (wn->id == WPI_ID_UNDEFINED) 3358 return EINVAL; 3359 3360 memset(&node, 0, sizeof node); 3361 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3362 node.id = wn->id; 3363 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3364 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3365 node.action = htole32(WPI_ACTION_SET_RATE); 3366 node.antenna = WPI_ANTENNA_BOTH; 3367 3368 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3369 wn->id, ether_sprintf(ni->ni_macaddr)); 3370 3371 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3372 if (error != 0) { 3373 device_printf(sc->sc_dev, 3374 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3375 error); 3376 return error; 3377 } 3378 3379 if (wvp->wv_gtk != 0) { 3380 error = wpi_set_global_keys(ni); 3381 if (error != 0) { 3382 device_printf(sc->sc_dev, 3383 "%s: error while setting global keys\n", __func__); 3384 return ENXIO; 3385 } 3386 } 3387 3388 return 0; 3389 } 3390 3391 /* 3392 * Broadcast node is used to send group-addressed and management frames. 3393 */ 3394 static int 3395 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3396 { 3397 struct ieee80211com *ic = &sc->sc_ic; 3398 struct wpi_node_info node; 3399 3400 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3401 3402 memset(&node, 0, sizeof node); 3403 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3404 node.id = WPI_ID_BROADCAST; 3405 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3406 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3407 node.action = htole32(WPI_ACTION_SET_RATE); 3408 node.antenna = WPI_ANTENNA_BOTH; 3409 3410 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3411 3412 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3413 } 3414 3415 static int 3416 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3417 { 3418 struct wpi_node *wn = WPI_NODE(ni); 3419 int error; 3420 3421 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3422 3423 wn->id = wpi_add_node_entry_sta(sc); 3424 3425 if ((error = wpi_add_node(sc, ni)) != 0) { 3426 wpi_del_node_entry(sc, wn->id); 3427 wn->id = WPI_ID_UNDEFINED; 3428 return error; 3429 } 3430 3431 return 0; 3432 } 3433 3434 static int 3435 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3436 { 3437 struct wpi_node *wn = WPI_NODE(ni); 3438 int error; 3439 3440 KASSERT(wn->id == WPI_ID_UNDEFINED, 3441 ("the node %d was added before", wn->id)); 3442 3443 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3444 3445 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3446 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3447 return ENOMEM; 3448 } 3449 3450 if ((error = wpi_add_node(sc, ni)) != 0) { 3451 wpi_del_node_entry(sc, wn->id); 3452 wn->id = WPI_ID_UNDEFINED; 3453 return error; 3454 } 3455 3456 return 0; 3457 } 3458 3459 static void 3460 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3461 { 3462 struct wpi_node *wn = WPI_NODE(ni); 3463 struct wpi_cmd_del_node node; 3464 int error; 3465 3466 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3467 3468 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3469 3470 memset(&node, 0, sizeof node); 3471 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3472 node.count = 1; 3473 3474 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3475 wn->id, ether_sprintf(ni->ni_macaddr)); 3476 3477 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3478 if (error != 0) { 3479 device_printf(sc->sc_dev, 3480 "%s: could not delete node %u, error %d\n", __func__, 3481 wn->id, error); 3482 } 3483 } 3484 3485 static int 3486 wpi_updateedca(struct ieee80211com *ic) 3487 { 3488 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3489 struct wpi_softc *sc = ic->ic_softc; 3490 struct wpi_edca_params cmd; 3491 int aci, error; 3492 3493 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3494 3495 memset(&cmd, 0, sizeof cmd); 3496 cmd.flags = htole32(WPI_EDCA_UPDATE); 3497 for (aci = 0; aci < WME_NUM_AC; aci++) { 3498 const struct wmeParams *ac = 3499 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3500 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3501 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3502 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3503 cmd.ac[aci].txoplimit = 3504 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3505 3506 DPRINTF(sc, WPI_DEBUG_EDCA, 3507 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3508 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3509 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3510 cmd.ac[aci].txoplimit); 3511 } 3512 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3513 3514 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3515 3516 return error; 3517 #undef WPI_EXP2 3518 } 3519 3520 static void 3521 wpi_set_promisc(struct wpi_softc *sc) 3522 { 3523 struct ieee80211com *ic = &sc->sc_ic; 3524 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3525 uint32_t promisc_filter; 3526 3527 promisc_filter = WPI_FILTER_CTL; 3528 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3529 promisc_filter |= WPI_FILTER_PROMISC; 3530 3531 if (ic->ic_promisc > 0) 3532 sc->rxon.filter |= htole32(promisc_filter); 3533 else 3534 sc->rxon.filter &= ~htole32(promisc_filter); 3535 } 3536 3537 static void 3538 wpi_update_promisc(struct ieee80211com *ic) 3539 { 3540 struct wpi_softc *sc = ic->ic_softc; 3541 3542 WPI_RXON_LOCK(sc); 3543 wpi_set_promisc(sc); 3544 3545 if (wpi_send_rxon(sc, 1, 1) != 0) { 3546 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3547 __func__); 3548 } 3549 WPI_RXON_UNLOCK(sc); 3550 } 3551 3552 static void 3553 wpi_update_mcast(struct ieee80211com *ic) 3554 { 3555 /* Ignore */ 3556 } 3557 3558 static void 3559 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3560 { 3561 struct wpi_cmd_led led; 3562 3563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3564 3565 led.which = which; 3566 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3567 led.off = off; 3568 led.on = on; 3569 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3570 } 3571 3572 static int 3573 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3574 { 3575 struct wpi_cmd_timing cmd; 3576 uint64_t val, mod; 3577 3578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3579 3580 memset(&cmd, 0, sizeof cmd); 3581 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3582 cmd.bintval = htole16(ni->ni_intval); 3583 cmd.lintval = htole16(10); 3584 3585 /* Compute remaining time until next beacon. */ 3586 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3587 mod = le64toh(cmd.tstamp) % val; 3588 cmd.binitval = htole32((uint32_t)(val - mod)); 3589 3590 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3591 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3592 3593 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3594 } 3595 3596 /* 3597 * This function is called periodically (every 60 seconds) to adjust output 3598 * power to temperature changes. 3599 */ 3600 static void 3601 wpi_power_calibration(struct wpi_softc *sc) 3602 { 3603 int temp; 3604 3605 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3606 3607 /* Update sensor data. */ 3608 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3609 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3610 3611 /* Sanity-check read value. */ 3612 if (temp < -260 || temp > 25) { 3613 /* This can't be correct, ignore. */ 3614 DPRINTF(sc, WPI_DEBUG_TEMP, 3615 "out-of-range temperature reported: %d\n", temp); 3616 return; 3617 } 3618 3619 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3620 3621 /* Adjust Tx power if need be. */ 3622 if (abs(temp - sc->temp) <= 6) 3623 return; 3624 3625 sc->temp = temp; 3626 3627 if (wpi_set_txpower(sc, 1) != 0) { 3628 /* just warn, too bad for the automatic calibration... */ 3629 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3630 } 3631 } 3632 3633 /* 3634 * Set TX power for current channel. 3635 */ 3636 static int 3637 wpi_set_txpower(struct wpi_softc *sc, int async) 3638 { 3639 struct wpi_power_group *group; 3640 struct wpi_cmd_txpower cmd; 3641 uint8_t chan; 3642 int idx, is_chan_5ghz, i; 3643 3644 /* Retrieve current channel from last RXON. */ 3645 chan = sc->rxon.chan; 3646 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3647 3648 /* Find the TX power group to which this channel belongs. */ 3649 if (is_chan_5ghz) { 3650 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3651 if (chan <= group->chan) 3652 break; 3653 } else 3654 group = &sc->groups[0]; 3655 3656 memset(&cmd, 0, sizeof cmd); 3657 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3658 cmd.chan = htole16(chan); 3659 3660 /* Set TX power for all OFDM and CCK rates. */ 3661 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3662 /* Retrieve TX power for this channel/rate. */ 3663 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3664 3665 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3666 3667 if (is_chan_5ghz) { 3668 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3669 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3670 } else { 3671 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3672 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3673 } 3674 DPRINTF(sc, WPI_DEBUG_TEMP, 3675 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3676 } 3677 3678 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3679 } 3680 3681 /* 3682 * Determine Tx power index for a given channel/rate combination. 3683 * This takes into account the regulatory information from EEPROM and the 3684 * current temperature. 3685 */ 3686 static int 3687 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3688 uint8_t chan, int is_chan_5ghz, int ridx) 3689 { 3690 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3691 #define fdivround(a, b, n) \ 3692 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3693 3694 /* Linear interpolation. */ 3695 #define interpolate(x, x1, y1, x2, y2, n) \ 3696 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3697 3698 struct wpi_power_sample *sample; 3699 int pwr, idx; 3700 3701 /* Default TX power is group maximum TX power minus 3dB. */ 3702 pwr = group->maxpwr / 2; 3703 3704 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3705 switch (ridx) { 3706 case WPI_RIDX_OFDM36: 3707 pwr -= is_chan_5ghz ? 5 : 0; 3708 break; 3709 case WPI_RIDX_OFDM48: 3710 pwr -= is_chan_5ghz ? 10 : 7; 3711 break; 3712 case WPI_RIDX_OFDM54: 3713 pwr -= is_chan_5ghz ? 12 : 9; 3714 break; 3715 } 3716 3717 /* Never exceed the channel maximum allowed TX power. */ 3718 pwr = min(pwr, sc->maxpwr[chan]); 3719 3720 /* Retrieve TX power index into gain tables from samples. */ 3721 for (sample = group->samples; sample < &group->samples[3]; sample++) 3722 if (pwr > sample[1].power) 3723 break; 3724 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3725 idx = interpolate(pwr, sample[0].power, sample[0].index, 3726 sample[1].power, sample[1].index, 19); 3727 3728 /*- 3729 * Adjust power index based on current temperature: 3730 * - if cooler than factory-calibrated: decrease output power 3731 * - if warmer than factory-calibrated: increase output power 3732 */ 3733 idx -= (sc->temp - group->temp) * 11 / 100; 3734 3735 /* Decrease TX power for CCK rates (-5dB). */ 3736 if (ridx >= WPI_RIDX_CCK1) 3737 idx += 10; 3738 3739 /* Make sure idx stays in a valid range. */ 3740 if (idx < 0) 3741 return 0; 3742 if (idx > WPI_MAX_PWR_INDEX) 3743 return WPI_MAX_PWR_INDEX; 3744 return idx; 3745 3746 #undef interpolate 3747 #undef fdivround 3748 } 3749 3750 /* 3751 * Set STA mode power saving level (between 0 and 5). 3752 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3753 */ 3754 static int 3755 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3756 { 3757 struct wpi_pmgt_cmd cmd; 3758 const struct wpi_pmgt *pmgt; 3759 uint32_t max, skip_dtim; 3760 uint32_t reg; 3761 int i; 3762 3763 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3764 "%s: dtim=%d, level=%d, async=%d\n", 3765 __func__, dtim, level, async); 3766 3767 /* Select which PS parameters to use. */ 3768 if (dtim <= 10) 3769 pmgt = &wpi_pmgt[0][level]; 3770 else 3771 pmgt = &wpi_pmgt[1][level]; 3772 3773 memset(&cmd, 0, sizeof cmd); 3774 WPI_TXQ_LOCK(sc); 3775 if (level != 0) { /* not CAM */ 3776 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3777 sc->sc_flags |= WPI_PS_PATH; 3778 } else 3779 sc->sc_flags &= ~WPI_PS_PATH; 3780 WPI_TXQ_UNLOCK(sc); 3781 /* Retrieve PCIe Active State Power Management (ASPM). */ 3782 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3783 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3784 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3785 3786 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3787 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3788 3789 if (dtim == 0) { 3790 dtim = 1; 3791 skip_dtim = 0; 3792 } else 3793 skip_dtim = pmgt->skip_dtim; 3794 3795 if (skip_dtim != 0) { 3796 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3797 max = pmgt->intval[4]; 3798 if (max == (uint32_t)-1) 3799 max = dtim * (skip_dtim + 1); 3800 else if (max > dtim) 3801 max = (max / dtim) * dtim; 3802 } else 3803 max = dtim; 3804 3805 for (i = 0; i < 5; i++) 3806 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3807 3808 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3809 } 3810 3811 static int 3812 wpi_send_btcoex(struct wpi_softc *sc) 3813 { 3814 struct wpi_bluetooth cmd; 3815 3816 memset(&cmd, 0, sizeof cmd); 3817 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3818 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3819 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3820 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3821 __func__); 3822 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3823 } 3824 3825 static int 3826 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3827 { 3828 int error; 3829 3830 if (async) 3831 WPI_RXON_LOCK_ASSERT(sc); 3832 3833 if (assoc && wpi_check_bss_filter(sc) != 0) { 3834 struct wpi_assoc rxon_assoc; 3835 3836 rxon_assoc.flags = sc->rxon.flags; 3837 rxon_assoc.filter = sc->rxon.filter; 3838 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3839 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3840 rxon_assoc.reserved = 0; 3841 3842 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3843 sizeof (struct wpi_assoc), async); 3844 if (error != 0) { 3845 device_printf(sc->sc_dev, 3846 "RXON_ASSOC command failed, error %d\n", error); 3847 return error; 3848 } 3849 } else { 3850 if (async) { 3851 WPI_NT_LOCK(sc); 3852 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3853 sizeof (struct wpi_rxon), async); 3854 if (error == 0) 3855 wpi_clear_node_table(sc); 3856 WPI_NT_UNLOCK(sc); 3857 } else { 3858 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3859 sizeof (struct wpi_rxon), async); 3860 if (error == 0) 3861 wpi_clear_node_table(sc); 3862 } 3863 3864 if (error != 0) { 3865 device_printf(sc->sc_dev, 3866 "RXON command failed, error %d\n", error); 3867 return error; 3868 } 3869 3870 /* Add broadcast node. */ 3871 error = wpi_add_broadcast_node(sc, async); 3872 if (error != 0) { 3873 device_printf(sc->sc_dev, 3874 "could not add broadcast node, error %d\n", error); 3875 return error; 3876 } 3877 } 3878 3879 /* Configuration has changed, set Tx power accordingly. */ 3880 if ((error = wpi_set_txpower(sc, async)) != 0) { 3881 device_printf(sc->sc_dev, 3882 "%s: could not set TX power, error %d\n", __func__, error); 3883 return error; 3884 } 3885 3886 return 0; 3887 } 3888 3889 /** 3890 * Configure the card to listen to a particular channel, this transisions the 3891 * card in to being able to receive frames from remote devices. 3892 */ 3893 static int 3894 wpi_config(struct wpi_softc *sc) 3895 { 3896 struct ieee80211com *ic = &sc->sc_ic; 3897 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3898 struct ieee80211_channel *c = ic->ic_curchan; 3899 int error; 3900 3901 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3902 3903 /* Set power saving level to CAM during initialization. */ 3904 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3905 device_printf(sc->sc_dev, 3906 "%s: could not set power saving level\n", __func__); 3907 return error; 3908 } 3909 3910 /* Configure bluetooth coexistence. */ 3911 if ((error = wpi_send_btcoex(sc)) != 0) { 3912 device_printf(sc->sc_dev, 3913 "could not configure bluetooth coexistence\n"); 3914 return error; 3915 } 3916 3917 /* Configure adapter. */ 3918 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3919 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3920 3921 /* Set default channel. */ 3922 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3923 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3924 if (IEEE80211_IS_CHAN_2GHZ(c)) 3925 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3926 3927 sc->rxon.filter = WPI_FILTER_MULTICAST; 3928 switch (ic->ic_opmode) { 3929 case IEEE80211_M_STA: 3930 sc->rxon.mode = WPI_MODE_STA; 3931 break; 3932 case IEEE80211_M_IBSS: 3933 sc->rxon.mode = WPI_MODE_IBSS; 3934 sc->rxon.filter |= WPI_FILTER_BEACON; 3935 break; 3936 case IEEE80211_M_HOSTAP: 3937 /* XXX workaround for beaconing */ 3938 sc->rxon.mode = WPI_MODE_IBSS; 3939 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3940 break; 3941 case IEEE80211_M_AHDEMO: 3942 sc->rxon.mode = WPI_MODE_HOSTAP; 3943 break; 3944 case IEEE80211_M_MONITOR: 3945 sc->rxon.mode = WPI_MODE_MONITOR; 3946 break; 3947 default: 3948 device_printf(sc->sc_dev, "unknown opmode %d\n", 3949 ic->ic_opmode); 3950 return EINVAL; 3951 } 3952 sc->rxon.filter = htole32(sc->rxon.filter); 3953 wpi_set_promisc(sc); 3954 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3955 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3956 3957 /* XXX Current configuration may be unusable. */ 3958 if (IEEE80211_IS_CHAN_NOADHOC(c) && sc->rxon.mode == WPI_MODE_IBSS) { 3959 device_printf(sc->sc_dev, 3960 "%s: invalid channel (%d) selected for IBSS mode\n", 3961 __func__, ieee80211_chan2ieee(ic, c)); 3962 return EINVAL; 3963 } 3964 3965 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3966 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3967 __func__); 3968 return error; 3969 } 3970 3971 /* Setup rate scalling. */ 3972 if ((error = wpi_mrr_setup(sc)) != 0) { 3973 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3974 error); 3975 return error; 3976 } 3977 3978 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3979 3980 return 0; 3981 } 3982 3983 static uint16_t 3984 wpi_get_active_dwell_time(struct wpi_softc *sc, 3985 struct ieee80211_channel *c, uint8_t n_probes) 3986 { 3987 /* No channel? Default to 2GHz settings. */ 3988 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3989 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3990 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3991 } 3992 3993 /* 5GHz dwell time. */ 3994 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3995 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3996 } 3997 3998 /* 3999 * Limit the total dwell time. 4000 * 4001 * Returns the dwell time in milliseconds. 4002 */ 4003 static uint16_t 4004 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4005 { 4006 struct ieee80211com *ic = &sc->sc_ic; 4007 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4008 int bintval = 0; 4009 4010 /* bintval is in TU (1.024mS) */ 4011 if (vap != NULL) 4012 bintval = vap->iv_bss->ni_intval; 4013 4014 /* 4015 * If it's non-zero, we should calculate the minimum of 4016 * it and the DWELL_BASE. 4017 * 4018 * XXX Yes, the math should take into account that bintval 4019 * is 1.024mS, not 1mS.. 4020 */ 4021 if (bintval > 0) { 4022 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4023 bintval); 4024 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4025 } 4026 4027 /* No association context? Default. */ 4028 return dwell_time; 4029 } 4030 4031 static uint16_t 4032 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4033 { 4034 uint16_t passive; 4035 4036 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4037 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4038 else 4039 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4040 4041 /* Clamp to the beacon interval if we're associated. */ 4042 return (wpi_limit_dwell(sc, passive)); 4043 } 4044 4045 static uint32_t 4046 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4047 { 4048 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4049 uint32_t nbeacons = time / bintval; 4050 4051 if (mod > WPI_PAUSE_MAX_TIME) 4052 mod = WPI_PAUSE_MAX_TIME; 4053 4054 return WPI_PAUSE_SCAN(nbeacons, mod); 4055 } 4056 4057 /* 4058 * Send a scan request to the firmware. 4059 */ 4060 static int 4061 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4062 { 4063 struct ieee80211com *ic = &sc->sc_ic; 4064 struct ieee80211_scan_state *ss = ic->ic_scan; 4065 struct ieee80211vap *vap = ss->ss_vap; 4066 struct wpi_scan_hdr *hdr; 4067 struct wpi_cmd_data *tx; 4068 struct wpi_scan_essid *essids; 4069 struct wpi_scan_chan *chan; 4070 struct ieee80211_frame *wh; 4071 struct ieee80211_rateset *rs; 4072 uint16_t dwell_active, dwell_passive; 4073 uint8_t *buf, *frm; 4074 int bgscan, bintval, buflen, error, i, nssid; 4075 4076 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4077 4078 /* 4079 * We are absolutely not allowed to send a scan command when another 4080 * scan command is pending. 4081 */ 4082 if (callout_pending(&sc->scan_timeout)) { 4083 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4084 __func__); 4085 error = EAGAIN; 4086 goto fail; 4087 } 4088 4089 bgscan = wpi_check_bss_filter(sc); 4090 bintval = vap->iv_bss->ni_intval; 4091 if (bgscan != 0 && 4092 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4093 error = EOPNOTSUPP; 4094 goto fail; 4095 } 4096 4097 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4098 if (buf == NULL) { 4099 device_printf(sc->sc_dev, 4100 "%s: could not allocate buffer for scan command\n", 4101 __func__); 4102 error = ENOMEM; 4103 goto fail; 4104 } 4105 hdr = (struct wpi_scan_hdr *)buf; 4106 4107 /* 4108 * Move to the next channel if no packets are received within 10 msecs 4109 * after sending the probe request. 4110 */ 4111 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4112 hdr->quiet_threshold = htole16(1); 4113 4114 if (bgscan != 0) { 4115 /* 4116 * Max needs to be greater than active and passive and quiet! 4117 * It's also in microseconds! 4118 */ 4119 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4120 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4121 bintval)); 4122 } 4123 4124 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4125 4126 tx = (struct wpi_cmd_data *)(hdr + 1); 4127 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4128 tx->id = WPI_ID_BROADCAST; 4129 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4130 4131 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4132 /* Send probe requests at 6Mbps. */ 4133 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4134 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4135 } else { 4136 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4137 /* Send probe requests at 1Mbps. */ 4138 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4139 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4140 } 4141 4142 essids = (struct wpi_scan_essid *)(tx + 1); 4143 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4144 for (i = 0; i < nssid; i++) { 4145 essids[i].id = IEEE80211_ELEMID_SSID; 4146 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4147 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4148 #ifdef WPI_DEBUG 4149 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4150 printf("Scanning Essid: "); 4151 ieee80211_print_essid(essids[i].data, essids[i].len); 4152 printf("\n"); 4153 } 4154 #endif 4155 } 4156 4157 /* 4158 * Build a probe request frame. Most of the following code is a 4159 * copy & paste of what is done in net80211. 4160 */ 4161 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4162 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4163 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4164 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4165 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4166 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4167 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4168 4169 frm = (uint8_t *)(wh + 1); 4170 frm = ieee80211_add_ssid(frm, NULL, 0); 4171 frm = ieee80211_add_rates(frm, rs); 4172 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4173 frm = ieee80211_add_xrates(frm, rs); 4174 4175 /* Set length of probe request. */ 4176 tx->len = htole16(frm - (uint8_t *)wh); 4177 4178 /* 4179 * Construct information about the channel that we 4180 * want to scan. The firmware expects this to be directly 4181 * after the scan probe request 4182 */ 4183 chan = (struct wpi_scan_chan *)frm; 4184 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4185 chan->flags = 0; 4186 if (nssid) { 4187 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4188 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4189 } else 4190 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4191 4192 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4193 chan->flags |= WPI_CHAN_ACTIVE; 4194 4195 /* 4196 * Calculate the active/passive dwell times. 4197 */ 4198 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4199 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4200 4201 /* Make sure they're valid. */ 4202 if (dwell_active > dwell_passive) 4203 dwell_active = dwell_passive; 4204 4205 chan->active = htole16(dwell_active); 4206 chan->passive = htole16(dwell_passive); 4207 4208 chan->dsp_gain = 0x6e; /* Default level */ 4209 4210 if (IEEE80211_IS_CHAN_5GHZ(c)) 4211 chan->rf_gain = 0x3b; 4212 else 4213 chan->rf_gain = 0x28; 4214 4215 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4216 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4217 4218 hdr->nchan++; 4219 4220 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4221 /* XXX Force probe request transmission. */ 4222 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4223 4224 chan++; 4225 4226 /* Reduce unnecessary delay. */ 4227 chan->flags = 0; 4228 chan->passive = chan->active = hdr->quiet_time; 4229 4230 hdr->nchan++; 4231 } 4232 4233 chan++; 4234 4235 buflen = (uint8_t *)chan - buf; 4236 hdr->len = htole16(buflen); 4237 4238 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4239 hdr->nchan); 4240 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4241 free(buf, M_DEVBUF); 4242 4243 if (error != 0) 4244 goto fail; 4245 4246 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4247 4248 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4249 4250 return 0; 4251 4252 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4253 4254 return error; 4255 } 4256 4257 static int 4258 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4259 { 4260 struct ieee80211com *ic = vap->iv_ic; 4261 struct ieee80211_node *ni = vap->iv_bss; 4262 struct ieee80211_channel *c = ni->ni_chan; 4263 int error; 4264 4265 WPI_RXON_LOCK(sc); 4266 4267 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4268 4269 /* Update adapter configuration. */ 4270 sc->rxon.associd = 0; 4271 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4272 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4273 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4274 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4275 if (IEEE80211_IS_CHAN_2GHZ(c)) 4276 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4277 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4278 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4279 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4280 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4281 if (IEEE80211_IS_CHAN_A(c)) { 4282 sc->rxon.cck_mask = 0; 4283 sc->rxon.ofdm_mask = 0x15; 4284 } else if (IEEE80211_IS_CHAN_B(c)) { 4285 sc->rxon.cck_mask = 0x03; 4286 sc->rxon.ofdm_mask = 0; 4287 } else { 4288 /* Assume 802.11b/g. */ 4289 sc->rxon.cck_mask = 0x0f; 4290 sc->rxon.ofdm_mask = 0x15; 4291 } 4292 4293 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4294 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4295 sc->rxon.ofdm_mask); 4296 4297 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4298 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4299 __func__); 4300 } 4301 4302 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4303 4304 WPI_RXON_UNLOCK(sc); 4305 4306 return error; 4307 } 4308 4309 static int 4310 wpi_config_beacon(struct wpi_vap *wvp) 4311 { 4312 struct ieee80211vap *vap = &wvp->wv_vap; 4313 struct ieee80211com *ic = vap->iv_ic; 4314 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4315 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4316 struct wpi_softc *sc = ic->ic_softc; 4317 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4318 struct ieee80211_tim_ie *tie; 4319 struct mbuf *m; 4320 uint8_t *ptr; 4321 int error; 4322 4323 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4324 4325 WPI_VAP_LOCK_ASSERT(wvp); 4326 4327 cmd->len = htole16(bcn->m->m_pkthdr.len); 4328 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4329 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4330 4331 /* XXX seems to be unused */ 4332 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4333 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4334 ptr = mtod(bcn->m, uint8_t *); 4335 4336 cmd->tim = htole16(bo->bo_tim - ptr); 4337 cmd->timsz = tie->tim_len; 4338 } 4339 4340 /* Necessary for recursion in ieee80211_beacon_update(). */ 4341 m = bcn->m; 4342 bcn->m = m_dup(m, M_NOWAIT); 4343 if (bcn->m == NULL) { 4344 device_printf(sc->sc_dev, 4345 "%s: could not copy beacon frame\n", __func__); 4346 error = ENOMEM; 4347 goto end; 4348 } 4349 4350 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4351 device_printf(sc->sc_dev, 4352 "%s: could not update beacon frame, error %d", __func__, 4353 error); 4354 } 4355 4356 /* Restore mbuf. */ 4357 end: bcn->m = m; 4358 4359 return error; 4360 } 4361 4362 static int 4363 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4364 { 4365 struct ieee80211vap *vap = ni->ni_vap; 4366 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4367 struct wpi_vap *wvp = WPI_VAP(vap); 4368 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4369 struct mbuf *m; 4370 int error; 4371 4372 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4373 4374 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4375 return EINVAL; 4376 4377 m = ieee80211_beacon_alloc(ni, bo); 4378 if (m == NULL) { 4379 device_printf(sc->sc_dev, 4380 "%s: could not allocate beacon frame\n", __func__); 4381 return ENOMEM; 4382 } 4383 4384 WPI_VAP_LOCK(wvp); 4385 if (bcn->m != NULL) 4386 m_freem(bcn->m); 4387 4388 bcn->m = m; 4389 4390 error = wpi_config_beacon(wvp); 4391 WPI_VAP_UNLOCK(wvp); 4392 4393 return error; 4394 } 4395 4396 static void 4397 wpi_update_beacon(struct ieee80211vap *vap, int item) 4398 { 4399 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4400 struct wpi_vap *wvp = WPI_VAP(vap); 4401 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4402 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4403 struct ieee80211_node *ni = vap->iv_bss; 4404 int mcast = 0; 4405 4406 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4407 4408 WPI_VAP_LOCK(wvp); 4409 if (bcn->m == NULL) { 4410 bcn->m = ieee80211_beacon_alloc(ni, bo); 4411 if (bcn->m == NULL) { 4412 device_printf(sc->sc_dev, 4413 "%s: could not allocate beacon frame\n", __func__); 4414 4415 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4416 __func__); 4417 4418 WPI_VAP_UNLOCK(wvp); 4419 return; 4420 } 4421 } 4422 WPI_VAP_UNLOCK(wvp); 4423 4424 if (item == IEEE80211_BEACON_TIM) 4425 mcast = 1; /* TODO */ 4426 4427 setbit(bo->bo_flags, item); 4428 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4429 4430 WPI_VAP_LOCK(wvp); 4431 wpi_config_beacon(wvp); 4432 WPI_VAP_UNLOCK(wvp); 4433 4434 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4435 } 4436 4437 static void 4438 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4439 { 4440 struct ieee80211vap *vap = ni->ni_vap; 4441 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4442 struct wpi_node *wn = WPI_NODE(ni); 4443 int error; 4444 4445 WPI_NT_LOCK(sc); 4446 4447 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4448 4449 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4450 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4451 device_printf(sc->sc_dev, 4452 "%s: could not add IBSS node, error %d\n", 4453 __func__, error); 4454 } 4455 } 4456 WPI_NT_UNLOCK(sc); 4457 } 4458 4459 static int 4460 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4461 { 4462 struct ieee80211com *ic = vap->iv_ic; 4463 struct ieee80211_node *ni = vap->iv_bss; 4464 struct ieee80211_channel *c = ni->ni_chan; 4465 int error; 4466 4467 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4468 4469 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4470 /* Link LED blinks while monitoring. */ 4471 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4472 return 0; 4473 } 4474 4475 /* XXX kernel panic workaround */ 4476 if (c == IEEE80211_CHAN_ANYC) { 4477 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4478 __func__); 4479 return EINVAL; 4480 } 4481 4482 if ((error = wpi_set_timing(sc, ni)) != 0) { 4483 device_printf(sc->sc_dev, 4484 "%s: could not set timing, error %d\n", __func__, error); 4485 return error; 4486 } 4487 4488 /* Update adapter configuration. */ 4489 WPI_RXON_LOCK(sc); 4490 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4491 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4492 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4493 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4494 if (IEEE80211_IS_CHAN_2GHZ(c)) 4495 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4496 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4497 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4498 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4499 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4500 if (IEEE80211_IS_CHAN_A(c)) { 4501 sc->rxon.cck_mask = 0; 4502 sc->rxon.ofdm_mask = 0x15; 4503 } else if (IEEE80211_IS_CHAN_B(c)) { 4504 sc->rxon.cck_mask = 0x03; 4505 sc->rxon.ofdm_mask = 0; 4506 } else { 4507 /* Assume 802.11b/g. */ 4508 sc->rxon.cck_mask = 0x0f; 4509 sc->rxon.ofdm_mask = 0x15; 4510 } 4511 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4512 4513 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4514 sc->rxon.chan, sc->rxon.flags); 4515 4516 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4517 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4518 __func__); 4519 return error; 4520 } 4521 4522 /* Start periodic calibration timer. */ 4523 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4524 4525 WPI_RXON_UNLOCK(sc); 4526 4527 if (vap->iv_opmode == IEEE80211_M_IBSS || 4528 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4529 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4530 device_printf(sc->sc_dev, 4531 "%s: could not setup beacon, error %d\n", __func__, 4532 error); 4533 return error; 4534 } 4535 } 4536 4537 if (vap->iv_opmode == IEEE80211_M_STA) { 4538 /* Add BSS node. */ 4539 WPI_NT_LOCK(sc); 4540 error = wpi_add_sta_node(sc, ni); 4541 WPI_NT_UNLOCK(sc); 4542 if (error != 0) { 4543 device_printf(sc->sc_dev, 4544 "%s: could not add BSS node, error %d\n", __func__, 4545 error); 4546 return error; 4547 } 4548 } 4549 4550 /* Link LED always on while associated. */ 4551 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4552 4553 /* Enable power-saving mode if requested by user. */ 4554 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4555 vap->iv_opmode != IEEE80211_M_IBSS) 4556 (void)wpi_set_pslevel(sc, 0, 3, 1); 4557 4558 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4559 4560 return 0; 4561 } 4562 4563 static int 4564 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4565 { 4566 const struct ieee80211_cipher *cip = k->wk_cipher; 4567 struct ieee80211vap *vap = ni->ni_vap; 4568 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4569 struct wpi_node *wn = WPI_NODE(ni); 4570 struct wpi_node_info node; 4571 uint16_t kflags; 4572 int error; 4573 4574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4575 4576 if (wpi_check_node_entry(sc, wn->id) == 0) { 4577 device_printf(sc->sc_dev, "%s: node does not exist\n", 4578 __func__); 4579 return 0; 4580 } 4581 4582 switch (cip->ic_cipher) { 4583 case IEEE80211_CIPHER_AES_CCM: 4584 kflags = WPI_KFLAG_CCMP; 4585 break; 4586 4587 default: 4588 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4589 cip->ic_cipher); 4590 return 0; 4591 } 4592 4593 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4594 if (k->wk_flags & IEEE80211_KEY_GROUP) 4595 kflags |= WPI_KFLAG_MULTICAST; 4596 4597 memset(&node, 0, sizeof node); 4598 node.id = wn->id; 4599 node.control = WPI_NODE_UPDATE; 4600 node.flags = WPI_FLAG_KEY_SET; 4601 node.kflags = htole16(kflags); 4602 memcpy(node.key, k->wk_key, k->wk_keylen); 4603 again: 4604 DPRINTF(sc, WPI_DEBUG_KEY, 4605 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4606 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4607 node.id, ether_sprintf(ni->ni_macaddr)); 4608 4609 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4610 if (error != 0) { 4611 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4612 error); 4613 return !error; 4614 } 4615 4616 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4617 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4618 kflags |= WPI_KFLAG_MULTICAST; 4619 node.kflags = htole16(kflags); 4620 4621 goto again; 4622 } 4623 4624 return 1; 4625 } 4626 4627 static void 4628 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4629 { 4630 const struct ieee80211_key *k = arg; 4631 struct ieee80211vap *vap = ni->ni_vap; 4632 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4633 struct wpi_node *wn = WPI_NODE(ni); 4634 int error; 4635 4636 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4637 return; 4638 4639 WPI_NT_LOCK(sc); 4640 error = wpi_load_key(ni, k); 4641 WPI_NT_UNLOCK(sc); 4642 4643 if (error == 0) { 4644 device_printf(sc->sc_dev, "%s: error while setting key\n", 4645 __func__); 4646 } 4647 } 4648 4649 static int 4650 wpi_set_global_keys(struct ieee80211_node *ni) 4651 { 4652 struct ieee80211vap *vap = ni->ni_vap; 4653 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4654 int error = 1; 4655 4656 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4657 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4658 error = wpi_load_key(ni, wk); 4659 4660 return !error; 4661 } 4662 4663 static int 4664 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4665 { 4666 struct ieee80211vap *vap = ni->ni_vap; 4667 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4668 struct wpi_node *wn = WPI_NODE(ni); 4669 struct wpi_node_info node; 4670 uint16_t kflags; 4671 int error; 4672 4673 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4674 4675 if (wpi_check_node_entry(sc, wn->id) == 0) { 4676 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4677 return 1; /* Nothing to do. */ 4678 } 4679 4680 kflags = WPI_KFLAG_KID(k->wk_keyix); 4681 if (k->wk_flags & IEEE80211_KEY_GROUP) 4682 kflags |= WPI_KFLAG_MULTICAST; 4683 4684 memset(&node, 0, sizeof node); 4685 node.id = wn->id; 4686 node.control = WPI_NODE_UPDATE; 4687 node.flags = WPI_FLAG_KEY_SET; 4688 node.kflags = htole16(kflags); 4689 again: 4690 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4691 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4692 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4693 4694 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4695 if (error != 0) { 4696 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4697 error); 4698 return !error; 4699 } 4700 4701 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4702 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4703 kflags |= WPI_KFLAG_MULTICAST; 4704 node.kflags = htole16(kflags); 4705 4706 goto again; 4707 } 4708 4709 return 1; 4710 } 4711 4712 static void 4713 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4714 { 4715 const struct ieee80211_key *k = arg; 4716 struct ieee80211vap *vap = ni->ni_vap; 4717 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4718 struct wpi_node *wn = WPI_NODE(ni); 4719 int error; 4720 4721 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4722 return; 4723 4724 WPI_NT_LOCK(sc); 4725 error = wpi_del_key(ni, k); 4726 WPI_NT_UNLOCK(sc); 4727 4728 if (error == 0) { 4729 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4730 __func__); 4731 } 4732 } 4733 4734 static int 4735 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4736 int set) 4737 { 4738 struct ieee80211com *ic = vap->iv_ic; 4739 struct wpi_softc *sc = ic->ic_softc; 4740 struct wpi_vap *wvp = WPI_VAP(vap); 4741 struct ieee80211_node *ni; 4742 int error, ni_ref = 0; 4743 4744 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4745 4746 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4747 /* Not for us. */ 4748 return 1; 4749 } 4750 4751 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4752 /* XMIT keys are handled in wpi_tx_data(). */ 4753 return 1; 4754 } 4755 4756 /* Handle group keys. */ 4757 if (&vap->iv_nw_keys[0] <= k && 4758 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4759 WPI_NT_LOCK(sc); 4760 if (set) 4761 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4762 else 4763 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4764 WPI_NT_UNLOCK(sc); 4765 4766 if (vap->iv_state == IEEE80211_S_RUN) { 4767 ieee80211_iterate_nodes(&ic->ic_sta, 4768 set ? wpi_load_key_cb : wpi_del_key_cb, 4769 __DECONST(void *, k)); 4770 } 4771 4772 return 1; 4773 } 4774 4775 switch (vap->iv_opmode) { 4776 case IEEE80211_M_STA: 4777 ni = vap->iv_bss; 4778 break; 4779 4780 case IEEE80211_M_IBSS: 4781 case IEEE80211_M_AHDEMO: 4782 case IEEE80211_M_HOSTAP: 4783 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4784 if (ni == NULL) 4785 return 0; /* should not happen */ 4786 4787 ni_ref = 1; 4788 break; 4789 4790 default: 4791 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4792 vap->iv_opmode); 4793 return 0; 4794 } 4795 4796 WPI_NT_LOCK(sc); 4797 if (set) 4798 error = wpi_load_key(ni, k); 4799 else 4800 error = wpi_del_key(ni, k); 4801 WPI_NT_UNLOCK(sc); 4802 4803 if (ni_ref) 4804 ieee80211_node_decref(ni); 4805 4806 return error; 4807 } 4808 4809 static int 4810 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4811 const uint8_t mac[IEEE80211_ADDR_LEN]) 4812 { 4813 return wpi_process_key(vap, k, 1); 4814 } 4815 4816 static int 4817 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4818 { 4819 return wpi_process_key(vap, k, 0); 4820 } 4821 4822 /* 4823 * This function is called after the runtime firmware notifies us of its 4824 * readiness (called in a process context). 4825 */ 4826 static int 4827 wpi_post_alive(struct wpi_softc *sc) 4828 { 4829 int ntries, error; 4830 4831 /* Check (again) that the radio is not disabled. */ 4832 if ((error = wpi_nic_lock(sc)) != 0) 4833 return error; 4834 4835 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4836 4837 /* NB: Runtime firmware must be up and running. */ 4838 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4839 device_printf(sc->sc_dev, 4840 "RF switch: radio disabled (%s)\n", __func__); 4841 wpi_nic_unlock(sc); 4842 return EPERM; /* :-) */ 4843 } 4844 wpi_nic_unlock(sc); 4845 4846 /* Wait for thermal sensor to calibrate. */ 4847 for (ntries = 0; ntries < 1000; ntries++) { 4848 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4849 break; 4850 DELAY(10); 4851 } 4852 4853 if (ntries == 1000) { 4854 device_printf(sc->sc_dev, 4855 "timeout waiting for thermal sensor calibration\n"); 4856 return ETIMEDOUT; 4857 } 4858 4859 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4860 return 0; 4861 } 4862 4863 /* 4864 * The firmware boot code is small and is intended to be copied directly into 4865 * the NIC internal memory (no DMA transfer). 4866 */ 4867 static int 4868 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4869 { 4870 int error, ntries; 4871 4872 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4873 4874 size /= sizeof (uint32_t); 4875 4876 if ((error = wpi_nic_lock(sc)) != 0) 4877 return error; 4878 4879 /* Copy microcode image into NIC memory. */ 4880 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4881 (const uint32_t *)ucode, size); 4882 4883 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4884 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4885 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4886 4887 /* Start boot load now. */ 4888 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4889 4890 /* Wait for transfer to complete. */ 4891 for (ntries = 0; ntries < 1000; ntries++) { 4892 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4893 DPRINTF(sc, WPI_DEBUG_HW, 4894 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4895 WPI_FH_TX_STATUS_IDLE(6), 4896 status & WPI_FH_TX_STATUS_IDLE(6)); 4897 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4898 DPRINTF(sc, WPI_DEBUG_HW, 4899 "Status Match! - ntries = %d\n", ntries); 4900 break; 4901 } 4902 DELAY(10); 4903 } 4904 if (ntries == 1000) { 4905 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4906 __func__); 4907 wpi_nic_unlock(sc); 4908 return ETIMEDOUT; 4909 } 4910 4911 /* Enable boot after power up. */ 4912 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4913 4914 wpi_nic_unlock(sc); 4915 return 0; 4916 } 4917 4918 static int 4919 wpi_load_firmware(struct wpi_softc *sc) 4920 { 4921 struct wpi_fw_info *fw = &sc->fw; 4922 struct wpi_dma_info *dma = &sc->fw_dma; 4923 int error; 4924 4925 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4926 4927 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4928 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4929 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4930 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4931 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4932 4933 /* Tell adapter where to find initialization sections. */ 4934 if ((error = wpi_nic_lock(sc)) != 0) 4935 return error; 4936 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4937 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4938 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4939 dma->paddr + WPI_FW_DATA_MAXSZ); 4940 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4941 wpi_nic_unlock(sc); 4942 4943 /* Load firmware boot code. */ 4944 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4945 if (error != 0) { 4946 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4947 __func__); 4948 return error; 4949 } 4950 4951 /* Now press "execute". */ 4952 WPI_WRITE(sc, WPI_RESET, 0); 4953 4954 /* Wait at most one second for first alive notification. */ 4955 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4956 device_printf(sc->sc_dev, 4957 "%s: timeout waiting for adapter to initialize, error %d\n", 4958 __func__, error); 4959 return error; 4960 } 4961 4962 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4963 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4964 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4965 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4966 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4967 4968 /* Tell adapter where to find runtime sections. */ 4969 if ((error = wpi_nic_lock(sc)) != 0) 4970 return error; 4971 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4972 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4973 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4974 dma->paddr + WPI_FW_DATA_MAXSZ); 4975 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4976 WPI_FW_UPDATED | fw->main.textsz); 4977 wpi_nic_unlock(sc); 4978 4979 return 0; 4980 } 4981 4982 static int 4983 wpi_read_firmware(struct wpi_softc *sc) 4984 { 4985 const struct firmware *fp; 4986 struct wpi_fw_info *fw = &sc->fw; 4987 const struct wpi_firmware_hdr *hdr; 4988 int error; 4989 4990 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4991 4992 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4993 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4994 4995 WPI_UNLOCK(sc); 4996 fp = firmware_get(WPI_FW_NAME); 4997 WPI_LOCK(sc); 4998 4999 if (fp == NULL) { 5000 device_printf(sc->sc_dev, 5001 "could not load firmware image '%s'\n", WPI_FW_NAME); 5002 return EINVAL; 5003 } 5004 5005 sc->fw_fp = fp; 5006 5007 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5008 device_printf(sc->sc_dev, 5009 "firmware file too short: %zu bytes\n", fp->datasize); 5010 error = EINVAL; 5011 goto fail; 5012 } 5013 5014 fw->size = fp->datasize; 5015 fw->data = (const uint8_t *)fp->data; 5016 5017 /* Extract firmware header information. */ 5018 hdr = (const struct wpi_firmware_hdr *)fw->data; 5019 5020 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5021 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5022 5023 fw->main.textsz = le32toh(hdr->rtextsz); 5024 fw->main.datasz = le32toh(hdr->rdatasz); 5025 fw->init.textsz = le32toh(hdr->itextsz); 5026 fw->init.datasz = le32toh(hdr->idatasz); 5027 fw->boot.textsz = le32toh(hdr->btextsz); 5028 fw->boot.datasz = 0; 5029 5030 /* Sanity-check firmware header. */ 5031 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5032 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5033 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5034 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5035 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5036 (fw->boot.textsz & 3) != 0) { 5037 device_printf(sc->sc_dev, "invalid firmware header\n"); 5038 error = EINVAL; 5039 goto fail; 5040 } 5041 5042 /* Check that all firmware sections fit. */ 5043 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5044 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5045 device_printf(sc->sc_dev, 5046 "firmware file too short: %zu bytes\n", fw->size); 5047 error = EINVAL; 5048 goto fail; 5049 } 5050 5051 /* Get pointers to firmware sections. */ 5052 fw->main.text = (const uint8_t *)(hdr + 1); 5053 fw->main.data = fw->main.text + fw->main.textsz; 5054 fw->init.text = fw->main.data + fw->main.datasz; 5055 fw->init.data = fw->init.text + fw->init.textsz; 5056 fw->boot.text = fw->init.data + fw->init.datasz; 5057 5058 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5059 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5060 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5061 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5062 fw->main.textsz, fw->main.datasz, 5063 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5064 5065 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5066 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5067 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5068 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5069 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5070 5071 return 0; 5072 5073 fail: wpi_unload_firmware(sc); 5074 return error; 5075 } 5076 5077 /** 5078 * Free the referenced firmware image 5079 */ 5080 static void 5081 wpi_unload_firmware(struct wpi_softc *sc) 5082 { 5083 if (sc->fw_fp != NULL) { 5084 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5085 sc->fw_fp = NULL; 5086 } 5087 } 5088 5089 static int 5090 wpi_clock_wait(struct wpi_softc *sc) 5091 { 5092 int ntries; 5093 5094 /* Set "initialization complete" bit. */ 5095 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5096 5097 /* Wait for clock stabilization. */ 5098 for (ntries = 0; ntries < 2500; ntries++) { 5099 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5100 return 0; 5101 DELAY(100); 5102 } 5103 device_printf(sc->sc_dev, 5104 "%s: timeout waiting for clock stabilization\n", __func__); 5105 5106 return ETIMEDOUT; 5107 } 5108 5109 static int 5110 wpi_apm_init(struct wpi_softc *sc) 5111 { 5112 uint32_t reg; 5113 int error; 5114 5115 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5116 5117 /* Disable L0s exit timer (NMI bug workaround). */ 5118 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5119 /* Don't wait for ICH L0s (ICH bug workaround). */ 5120 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5121 5122 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5123 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5124 5125 /* Retrieve PCIe Active State Power Management (ASPM). */ 5126 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5127 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5128 if (reg & 0x02) /* L1 Entry enabled. */ 5129 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5130 else 5131 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5132 5133 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5134 5135 /* Wait for clock stabilization before accessing prph. */ 5136 if ((error = wpi_clock_wait(sc)) != 0) 5137 return error; 5138 5139 if ((error = wpi_nic_lock(sc)) != 0) 5140 return error; 5141 /* Cleanup. */ 5142 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5143 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5144 5145 /* Enable DMA and BSM (Bootstrap State Machine). */ 5146 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5147 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5148 DELAY(20); 5149 /* Disable L1-Active. */ 5150 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5151 wpi_nic_unlock(sc); 5152 5153 return 0; 5154 } 5155 5156 static void 5157 wpi_apm_stop_master(struct wpi_softc *sc) 5158 { 5159 int ntries; 5160 5161 /* Stop busmaster DMA activity. */ 5162 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5163 5164 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5165 WPI_GP_CNTRL_MAC_PS) 5166 return; /* Already asleep. */ 5167 5168 for (ntries = 0; ntries < 100; ntries++) { 5169 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5170 return; 5171 DELAY(10); 5172 } 5173 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5174 __func__); 5175 } 5176 5177 static void 5178 wpi_apm_stop(struct wpi_softc *sc) 5179 { 5180 wpi_apm_stop_master(sc); 5181 5182 /* Reset the entire device. */ 5183 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5184 DELAY(10); 5185 /* Clear "initialization complete" bit. */ 5186 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5187 } 5188 5189 static void 5190 wpi_nic_config(struct wpi_softc *sc) 5191 { 5192 uint32_t rev; 5193 5194 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5195 5196 /* voodoo from the Linux "driver".. */ 5197 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5198 if ((rev & 0xc0) == 0x40) 5199 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5200 else if (!(rev & 0x80)) 5201 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5202 5203 if (sc->cap == 0x80) 5204 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5205 5206 if ((sc->rev & 0xf0) == 0xd0) 5207 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5208 else 5209 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5210 5211 if (sc->type > 1) 5212 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5213 } 5214 5215 static int 5216 wpi_hw_init(struct wpi_softc *sc) 5217 { 5218 int chnl, ntries, error; 5219 5220 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5221 5222 /* Clear pending interrupts. */ 5223 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5224 5225 if ((error = wpi_apm_init(sc)) != 0) { 5226 device_printf(sc->sc_dev, 5227 "%s: could not power ON adapter, error %d\n", __func__, 5228 error); 5229 return error; 5230 } 5231 5232 /* Select VMAIN power source. */ 5233 if ((error = wpi_nic_lock(sc)) != 0) 5234 return error; 5235 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5236 wpi_nic_unlock(sc); 5237 /* Spin until VMAIN gets selected. */ 5238 for (ntries = 0; ntries < 5000; ntries++) { 5239 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5240 break; 5241 DELAY(10); 5242 } 5243 if (ntries == 5000) { 5244 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5245 return ETIMEDOUT; 5246 } 5247 5248 /* Perform adapter initialization. */ 5249 wpi_nic_config(sc); 5250 5251 /* Initialize RX ring. */ 5252 if ((error = wpi_nic_lock(sc)) != 0) 5253 return error; 5254 /* Set physical address of RX ring. */ 5255 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5256 /* Set physical address of RX read pointer. */ 5257 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5258 offsetof(struct wpi_shared, next)); 5259 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5260 /* Enable RX. */ 5261 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5262 WPI_FH_RX_CONFIG_DMA_ENA | 5263 WPI_FH_RX_CONFIG_RDRBD_ENA | 5264 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5265 WPI_FH_RX_CONFIG_MAXFRAG | 5266 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5267 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5268 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5269 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5270 wpi_nic_unlock(sc); 5271 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5272 5273 /* Initialize TX rings. */ 5274 if ((error = wpi_nic_lock(sc)) != 0) 5275 return error; 5276 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5277 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5278 /* Enable all 6 TX rings. */ 5279 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5280 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5281 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5282 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5283 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5284 /* Set physical address of TX rings. */ 5285 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5286 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5287 5288 /* Enable all DMA channels. */ 5289 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5290 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5291 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5292 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5293 } 5294 wpi_nic_unlock(sc); 5295 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5296 5297 /* Clear "radio off" and "commands blocked" bits. */ 5298 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5299 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5300 5301 /* Clear pending interrupts. */ 5302 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5303 /* Enable interrupts. */ 5304 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5305 5306 /* _Really_ make sure "radio off" bit is cleared! */ 5307 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5308 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5309 5310 if ((error = wpi_load_firmware(sc)) != 0) { 5311 device_printf(sc->sc_dev, 5312 "%s: could not load firmware, error %d\n", __func__, 5313 error); 5314 return error; 5315 } 5316 /* Wait at most one second for firmware alive notification. */ 5317 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5318 device_printf(sc->sc_dev, 5319 "%s: timeout waiting for adapter to initialize, error %d\n", 5320 __func__, error); 5321 return error; 5322 } 5323 5324 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5325 5326 /* Do post-firmware initialization. */ 5327 return wpi_post_alive(sc); 5328 } 5329 5330 static void 5331 wpi_hw_stop(struct wpi_softc *sc) 5332 { 5333 int chnl, qid, ntries; 5334 5335 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5336 5337 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5338 wpi_nic_lock(sc); 5339 5340 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5341 5342 /* Disable interrupts. */ 5343 WPI_WRITE(sc, WPI_INT_MASK, 0); 5344 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5345 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5346 5347 /* Make sure we no longer hold the NIC lock. */ 5348 wpi_nic_unlock(sc); 5349 5350 if (wpi_nic_lock(sc) == 0) { 5351 /* Stop TX scheduler. */ 5352 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5353 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5354 5355 /* Stop all DMA channels. */ 5356 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5357 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5358 for (ntries = 0; ntries < 200; ntries++) { 5359 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5360 WPI_FH_TX_STATUS_IDLE(chnl)) 5361 break; 5362 DELAY(10); 5363 } 5364 } 5365 wpi_nic_unlock(sc); 5366 } 5367 5368 /* Stop RX ring. */ 5369 wpi_reset_rx_ring(sc); 5370 5371 /* Reset all TX rings. */ 5372 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5373 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5374 5375 if (wpi_nic_lock(sc) == 0) { 5376 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5377 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5378 wpi_nic_unlock(sc); 5379 } 5380 DELAY(5); 5381 /* Power OFF adapter. */ 5382 wpi_apm_stop(sc); 5383 } 5384 5385 static void 5386 wpi_radio_on(void *arg0, int pending) 5387 { 5388 struct wpi_softc *sc = arg0; 5389 struct ieee80211com *ic = &sc->sc_ic; 5390 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5391 5392 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5393 5394 WPI_LOCK(sc); 5395 callout_stop(&sc->watchdog_rfkill); 5396 WPI_UNLOCK(sc); 5397 5398 if (vap != NULL) 5399 ieee80211_init(vap); 5400 } 5401 5402 static void 5403 wpi_radio_off(void *arg0, int pending) 5404 { 5405 struct wpi_softc *sc = arg0; 5406 struct ieee80211com *ic = &sc->sc_ic; 5407 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5408 5409 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5410 5411 ieee80211_notify_radio(ic, 0); 5412 wpi_stop(sc); 5413 if (vap != NULL) 5414 ieee80211_stop(vap); 5415 5416 WPI_LOCK(sc); 5417 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5418 WPI_UNLOCK(sc); 5419 } 5420 5421 static int 5422 wpi_init(struct wpi_softc *sc) 5423 { 5424 int error = 0; 5425 5426 WPI_LOCK(sc); 5427 5428 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5429 5430 if (sc->sc_running != 0) 5431 goto end; 5432 5433 /* Check that the radio is not disabled by hardware switch. */ 5434 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5435 device_printf(sc->sc_dev, 5436 "RF switch: radio disabled (%s)\n", __func__); 5437 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5438 sc); 5439 error = EINPROGRESS; 5440 goto end; 5441 } 5442 5443 /* Read firmware images from the filesystem. */ 5444 if ((error = wpi_read_firmware(sc)) != 0) { 5445 device_printf(sc->sc_dev, 5446 "%s: could not read firmware, error %d\n", __func__, 5447 error); 5448 goto end; 5449 } 5450 5451 sc->sc_running = 1; 5452 5453 /* Initialize hardware and upload firmware. */ 5454 error = wpi_hw_init(sc); 5455 wpi_unload_firmware(sc); 5456 if (error != 0) { 5457 device_printf(sc->sc_dev, 5458 "%s: could not initialize hardware, error %d\n", __func__, 5459 error); 5460 goto fail; 5461 } 5462 5463 /* Configure adapter now that it is ready. */ 5464 if ((error = wpi_config(sc)) != 0) { 5465 device_printf(sc->sc_dev, 5466 "%s: could not configure device, error %d\n", __func__, 5467 error); 5468 goto fail; 5469 } 5470 5471 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5472 5473 WPI_UNLOCK(sc); 5474 5475 return 0; 5476 5477 fail: wpi_stop_locked(sc); 5478 5479 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5480 WPI_UNLOCK(sc); 5481 5482 return error; 5483 } 5484 5485 static void 5486 wpi_stop_locked(struct wpi_softc *sc) 5487 { 5488 5489 WPI_LOCK_ASSERT(sc); 5490 5491 if (sc->sc_running == 0) 5492 return; 5493 5494 WPI_TX_LOCK(sc); 5495 WPI_TXQ_LOCK(sc); 5496 sc->sc_running = 0; 5497 WPI_TXQ_UNLOCK(sc); 5498 WPI_TX_UNLOCK(sc); 5499 5500 WPI_TXQ_STATE_LOCK(sc); 5501 callout_stop(&sc->tx_timeout); 5502 WPI_TXQ_STATE_UNLOCK(sc); 5503 5504 WPI_RXON_LOCK(sc); 5505 callout_stop(&sc->scan_timeout); 5506 callout_stop(&sc->calib_to); 5507 WPI_RXON_UNLOCK(sc); 5508 5509 /* Power OFF hardware. */ 5510 wpi_hw_stop(sc); 5511 } 5512 5513 static void 5514 wpi_stop(struct wpi_softc *sc) 5515 { 5516 WPI_LOCK(sc); 5517 wpi_stop_locked(sc); 5518 WPI_UNLOCK(sc); 5519 } 5520 5521 /* 5522 * Callback from net80211 to start a scan. 5523 */ 5524 static void 5525 wpi_scan_start(struct ieee80211com *ic) 5526 { 5527 struct wpi_softc *sc = ic->ic_softc; 5528 5529 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5530 } 5531 5532 /* 5533 * Callback from net80211 to terminate a scan. 5534 */ 5535 static void 5536 wpi_scan_end(struct ieee80211com *ic) 5537 { 5538 struct wpi_softc *sc = ic->ic_softc; 5539 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5540 5541 if (vap->iv_state == IEEE80211_S_RUN) 5542 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5543 } 5544 5545 /** 5546 * Called by the net80211 framework to indicate to the driver 5547 * that the channel should be changed 5548 */ 5549 static void 5550 wpi_set_channel(struct ieee80211com *ic) 5551 { 5552 const struct ieee80211_channel *c = ic->ic_curchan; 5553 struct wpi_softc *sc = ic->ic_softc; 5554 int error; 5555 5556 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5557 5558 WPI_LOCK(sc); 5559 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5560 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5561 WPI_UNLOCK(sc); 5562 WPI_TX_LOCK(sc); 5563 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5564 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5565 WPI_TX_UNLOCK(sc); 5566 5567 /* 5568 * Only need to set the channel in Monitor mode. AP scanning and auth 5569 * are already taken care of by their respective firmware commands. 5570 */ 5571 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5572 WPI_RXON_LOCK(sc); 5573 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5574 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5575 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5576 WPI_RXON_24GHZ); 5577 } else { 5578 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5579 WPI_RXON_24GHZ); 5580 } 5581 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5582 device_printf(sc->sc_dev, 5583 "%s: error %d setting channel\n", __func__, 5584 error); 5585 WPI_RXON_UNLOCK(sc); 5586 } 5587 } 5588 5589 /** 5590 * Called by net80211 to indicate that we need to scan the current 5591 * channel. The channel is previously be set via the wpi_set_channel 5592 * callback. 5593 */ 5594 static void 5595 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5596 { 5597 struct ieee80211vap *vap = ss->ss_vap; 5598 struct ieee80211com *ic = vap->iv_ic; 5599 struct wpi_softc *sc = ic->ic_softc; 5600 int error; 5601 5602 WPI_RXON_LOCK(sc); 5603 error = wpi_scan(sc, ic->ic_curchan); 5604 WPI_RXON_UNLOCK(sc); 5605 if (error != 0) 5606 ieee80211_cancel_scan(vap); 5607 } 5608 5609 /** 5610 * Called by the net80211 framework to indicate 5611 * the minimum dwell time has been met, terminate the scan. 5612 * We don't actually terminate the scan as the firmware will notify 5613 * us when it's finished and we have no way to interrupt it. 5614 */ 5615 static void 5616 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5617 { 5618 /* NB: don't try to abort scan; wait for firmware to finish */ 5619 } 5620 5621 static void 5622 wpi_hw_reset(void *arg, int pending) 5623 { 5624 struct wpi_softc *sc = arg; 5625 struct ieee80211com *ic = &sc->sc_ic; 5626 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5627 5628 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5629 5630 ieee80211_notify_radio(ic, 0); 5631 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5632 ieee80211_cancel_scan(vap); 5633 5634 wpi_stop(sc); 5635 if (vap != NULL) { 5636 ieee80211_stop(vap); 5637 ieee80211_init(vap); 5638 } 5639 } 5640