1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 /* 24 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 25 * 26 * The 3945ABG network adapter doesn't use traditional hardware as 27 * many other adaptors do. Instead at run time the eeprom is set into a known 28 * state and told to load boot firmware. The boot firmware loads an init and a 29 * main binary firmware image into SRAM on the card via DMA. 30 * Once the firmware is loaded, the driver/hw then 31 * communicate by way of circular dma rings via the SRAM to the firmware. 32 * 33 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 34 * The 4 tx data rings allow for prioritization QoS. 35 * 36 * The rx data ring consists of 32 dma buffers. Two registers are used to 37 * indicate where in the ring the driver and the firmware are up to. The 38 * driver sets the initial read index (reg1) and the initial write index (reg2), 39 * the firmware updates the read index (reg1) on rx of a packet and fires an 40 * interrupt. The driver then processes the buffers starting at reg1 indicating 41 * to the firmware which buffers have been accessed by updating reg2. At the 42 * same time allocating new memory for the processed buffer. 43 * 44 * A similar thing happens with the tx rings. The difference is the firmware 45 * stop processing buffers once the queue is full and until confirmation 46 * of a successful transmition (tx_done) has occurred. 47 * 48 * The command ring operates in the same manner as the tx queues. 49 * 50 * All communication direct to the card (ie eeprom) is classed as Stage1 51 * communication 52 * 53 * All communication via the firmware to the card is classed as State2. 54 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 55 * firmware. The bootstrap firmware and runtime firmware are loaded 56 * from host memory via dma to the card then told to execute. From this point 57 * on the majority of communications between the driver and the card goes 58 * via the firmware. 59 */ 60 61 #include "opt_wlan.h" 62 #include "opt_wpi.h" 63 64 #include <sys/param.h> 65 #include <sys/sysctl.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 #include <sys/systm.h> 71 #include <sys/malloc.h> 72 #include <sys/queue.h> 73 #include <sys/taskqueue.h> 74 #include <sys/module.h> 75 #include <sys/bus.h> 76 #include <sys/endian.h> 77 #include <sys/linker.h> 78 #include <sys/firmware.h> 79 80 #include <machine/bus.h> 81 #include <machine/resource.h> 82 #include <sys/rman.h> 83 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 87 #include <net/bpf.h> 88 #include <net/if.h> 89 #include <net/if_var.h> 90 #include <net/if_arp.h> 91 #include <net/ethernet.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/if_types.h> 95 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/in_var.h> 99 #include <netinet/if_ether.h> 100 #include <netinet/ip.h> 101 102 #include <net80211/ieee80211_var.h> 103 #include <net80211/ieee80211_radiotap.h> 104 #include <net80211/ieee80211_regdomain.h> 105 #include <net80211/ieee80211_ratectl.h> 106 107 #include <dev/wpi/if_wpireg.h> 108 #include <dev/wpi/if_wpivar.h> 109 #include <dev/wpi/if_wpi_debug.h> 110 111 struct wpi_ident { 112 uint16_t vendor; 113 uint16_t device; 114 uint16_t subdevice; 115 const char *name; 116 }; 117 118 static const struct wpi_ident wpi_ident_table[] = { 119 /* The below entries support ABG regardless of the subid */ 120 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 122 /* The below entries only support BG */ 123 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 127 { 0, 0, 0, NULL } 128 }; 129 130 static int wpi_probe(device_t); 131 static int wpi_attach(device_t); 132 static void wpi_radiotap_attach(struct wpi_softc *); 133 static void wpi_sysctlattach(struct wpi_softc *); 134 static void wpi_init_beacon(struct wpi_vap *); 135 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 136 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137 const uint8_t [IEEE80211_ADDR_LEN], 138 const uint8_t [IEEE80211_ADDR_LEN]); 139 static void wpi_vap_delete(struct ieee80211vap *); 140 static int wpi_detach(device_t); 141 static int wpi_shutdown(device_t); 142 static int wpi_suspend(device_t); 143 static int wpi_resume(device_t); 144 static int wpi_nic_lock(struct wpi_softc *); 145 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 146 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 147 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 148 void **, bus_size_t, bus_size_t); 149 static void wpi_dma_contig_free(struct wpi_dma_info *); 150 static int wpi_alloc_shared(struct wpi_softc *); 151 static void wpi_free_shared(struct wpi_softc *); 152 static int wpi_alloc_fwmem(struct wpi_softc *); 153 static void wpi_free_fwmem(struct wpi_softc *); 154 static int wpi_alloc_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring(struct wpi_softc *); 156 static void wpi_update_rx_ring_ps(struct wpi_softc *); 157 static void wpi_reset_rx_ring(struct wpi_softc *); 158 static void wpi_free_rx_ring(struct wpi_softc *); 159 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 160 uint8_t); 161 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static void wpi_update_tx_ring_ps(struct wpi_softc *, 163 struct wpi_tx_ring *); 164 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 166 static int wpi_read_eeprom(struct wpi_softc *, 167 uint8_t macaddr[IEEE80211_ADDR_LEN]); 168 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 169 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t); 170 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 171 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 172 struct ieee80211_channel *); 173 static int wpi_setregdomain(struct ieee80211com *, 174 struct ieee80211_regdomain *, int, 175 struct ieee80211_channel[]); 176 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 181 const struct ieee80211_rx_stats *, 182 int, int); 183 static void wpi_restore_node(void *, struct ieee80211_node *); 184 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 185 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static void wpi_calib_timeout(void *); 187 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 188 struct wpi_rx_data *); 189 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 190 struct wpi_rx_data *); 191 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_notif_intr(struct wpi_softc *); 194 static void wpi_wakeup_intr(struct wpi_softc *); 195 #ifdef WPI_DEBUG 196 static void wpi_debug_registers(struct wpi_softc *); 197 #endif 198 static void wpi_fatal_intr(struct wpi_softc *); 199 static void wpi_intr(void *); 200 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 201 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 202 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 203 struct ieee80211_node *); 204 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 205 struct ieee80211_node *, 206 const struct ieee80211_bpf_params *); 207 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 208 const struct ieee80211_bpf_params *); 209 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 210 static void wpi_watchdog_rfkill(void *); 211 static void wpi_scan_timeout(void *); 212 static void wpi_tx_timeout(void *); 213 static void wpi_parent(struct ieee80211com *); 214 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 215 int); 216 static int wpi_mrr_setup(struct wpi_softc *); 217 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 218 static int wpi_add_broadcast_node(struct wpi_softc *, int); 219 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 220 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 221 static int wpi_updateedca(struct ieee80211com *); 222 static void wpi_set_promisc(struct wpi_softc *); 223 static void wpi_update_promisc(struct ieee80211com *); 224 static void wpi_update_mcast(struct ieee80211com *); 225 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 226 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 227 static void wpi_power_calibration(struct wpi_softc *); 228 static int wpi_set_txpower(struct wpi_softc *, int); 229 static int wpi_get_power_index(struct wpi_softc *, 230 struct wpi_power_group *, uint8_t, int, int); 231 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 232 static int wpi_send_btcoex(struct wpi_softc *); 233 static int wpi_send_rxon(struct wpi_softc *, int, int); 234 static int wpi_config(struct wpi_softc *); 235 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 236 struct ieee80211_channel *, uint8_t); 237 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 238 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 239 struct ieee80211_channel *); 240 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 241 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 242 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 243 static int wpi_config_beacon(struct wpi_vap *); 244 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 245 static void wpi_update_beacon(struct ieee80211vap *, int); 246 static void wpi_newassoc(struct ieee80211_node *, int); 247 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 248 static int wpi_load_key(struct ieee80211_node *, 249 const struct ieee80211_key *); 250 static void wpi_load_key_cb(void *, struct ieee80211_node *); 251 static int wpi_set_global_keys(struct ieee80211_node *); 252 static int wpi_del_key(struct ieee80211_node *, 253 const struct ieee80211_key *); 254 static void wpi_del_key_cb(void *, struct ieee80211_node *); 255 static int wpi_process_key(struct ieee80211vap *, 256 const struct ieee80211_key *, int); 257 static int wpi_key_set(struct ieee80211vap *, 258 const struct ieee80211_key *); 259 static int wpi_key_delete(struct ieee80211vap *, 260 const struct ieee80211_key *); 261 static int wpi_post_alive(struct wpi_softc *); 262 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 263 uint32_t); 264 static int wpi_load_firmware(struct wpi_softc *); 265 static int wpi_read_firmware(struct wpi_softc *); 266 static void wpi_unload_firmware(struct wpi_softc *); 267 static int wpi_clock_wait(struct wpi_softc *); 268 static int wpi_apm_init(struct wpi_softc *); 269 static void wpi_apm_stop_master(struct wpi_softc *); 270 static void wpi_apm_stop(struct wpi_softc *); 271 static void wpi_nic_config(struct wpi_softc *); 272 static int wpi_hw_init(struct wpi_softc *); 273 static void wpi_hw_stop(struct wpi_softc *); 274 static void wpi_radio_on(void *, int); 275 static void wpi_radio_off(void *, int); 276 static int wpi_init(struct wpi_softc *); 277 static void wpi_stop_locked(struct wpi_softc *); 278 static void wpi_stop(struct wpi_softc *); 279 static void wpi_scan_start(struct ieee80211com *); 280 static void wpi_scan_end(struct ieee80211com *); 281 static void wpi_set_channel(struct ieee80211com *); 282 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 283 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 284 static void wpi_hw_reset(void *, int); 285 286 static device_method_t wpi_methods[] = { 287 /* Device interface */ 288 DEVMETHOD(device_probe, wpi_probe), 289 DEVMETHOD(device_attach, wpi_attach), 290 DEVMETHOD(device_detach, wpi_detach), 291 DEVMETHOD(device_shutdown, wpi_shutdown), 292 DEVMETHOD(device_suspend, wpi_suspend), 293 DEVMETHOD(device_resume, wpi_resume), 294 295 DEVMETHOD_END 296 }; 297 298 static driver_t wpi_driver = { 299 "wpi", 300 wpi_methods, 301 sizeof (struct wpi_softc) 302 }; 303 static devclass_t wpi_devclass; 304 305 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 306 307 MODULE_VERSION(wpi, 1); 308 309 MODULE_DEPEND(wpi, pci, 1, 1, 1); 310 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 311 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 312 313 static int 314 wpi_probe(device_t dev) 315 { 316 const struct wpi_ident *ident; 317 318 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 319 if (pci_get_vendor(dev) == ident->vendor && 320 pci_get_device(dev) == ident->device) { 321 device_set_desc(dev, ident->name); 322 return (BUS_PROBE_DEFAULT); 323 } 324 } 325 return ENXIO; 326 } 327 328 static int 329 wpi_attach(device_t dev) 330 { 331 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 332 struct ieee80211com *ic; 333 uint8_t i; 334 int error, rid; 335 #ifdef WPI_DEBUG 336 int supportsa = 1; 337 const struct wpi_ident *ident; 338 #endif 339 340 sc->sc_dev = dev; 341 342 #ifdef WPI_DEBUG 343 error = resource_int_value(device_get_name(sc->sc_dev), 344 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 345 if (error != 0) 346 sc->sc_debug = 0; 347 #else 348 sc->sc_debug = 0; 349 #endif 350 351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 352 353 /* 354 * Get the offset of the PCI Express Capability Structure in PCI 355 * Configuration Space. 356 */ 357 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 358 if (error != 0) { 359 device_printf(dev, "PCIe capability structure not found!\n"); 360 return error; 361 } 362 363 /* 364 * Some card's only support 802.11b/g not a, check to see if 365 * this is one such card. A 0x0 in the subdevice table indicates 366 * the entire subdevice range is to be ignored. 367 */ 368 #ifdef WPI_DEBUG 369 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 370 if (ident->subdevice && 371 pci_get_subdevice(dev) == ident->subdevice) { 372 supportsa = 0; 373 break; 374 } 375 } 376 #endif 377 378 /* Clear device-specific "PCI retry timeout" register (41h). */ 379 pci_write_config(dev, 0x41, 0, 1); 380 381 /* Enable bus-mastering. */ 382 pci_enable_busmaster(dev); 383 384 rid = PCIR_BAR(0); 385 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 386 RF_ACTIVE); 387 if (sc->mem == NULL) { 388 device_printf(dev, "can't map mem space\n"); 389 return ENOMEM; 390 } 391 sc->sc_st = rman_get_bustag(sc->mem); 392 sc->sc_sh = rman_get_bushandle(sc->mem); 393 394 rid = 1; 395 if (pci_alloc_msi(dev, &rid) == 0) 396 rid = 1; 397 else 398 rid = 0; 399 /* Install interrupt handler. */ 400 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 401 (rid != 0 ? 0 : RF_SHAREABLE)); 402 if (sc->irq == NULL) { 403 device_printf(dev, "can't map interrupt\n"); 404 error = ENOMEM; 405 goto fail; 406 } 407 408 WPI_LOCK_INIT(sc); 409 WPI_TX_LOCK_INIT(sc); 410 WPI_RXON_LOCK_INIT(sc); 411 WPI_NT_LOCK_INIT(sc); 412 WPI_TXQ_LOCK_INIT(sc); 413 WPI_TXQ_STATE_LOCK_INIT(sc); 414 415 /* Allocate DMA memory for firmware transfers. */ 416 if ((error = wpi_alloc_fwmem(sc)) != 0) { 417 device_printf(dev, 418 "could not allocate memory for firmware, error %d\n", 419 error); 420 goto fail; 421 } 422 423 /* Allocate shared page. */ 424 if ((error = wpi_alloc_shared(sc)) != 0) { 425 device_printf(dev, "could not allocate shared page\n"); 426 goto fail; 427 } 428 429 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 430 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 431 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 432 device_printf(dev, 433 "could not allocate TX ring %d, error %d\n", i, 434 error); 435 goto fail; 436 } 437 } 438 439 /* Allocate RX ring. */ 440 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 441 device_printf(dev, "could not allocate RX ring, error %d\n", 442 error); 443 goto fail; 444 } 445 446 /* Clear pending interrupts. */ 447 WPI_WRITE(sc, WPI_INT, 0xffffffff); 448 449 ic = &sc->sc_ic; 450 ic->ic_softc = sc; 451 ic->ic_name = device_get_nameunit(dev); 452 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 453 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 454 455 /* Set device capabilities. */ 456 ic->ic_caps = 457 IEEE80211_C_STA /* station mode supported */ 458 | IEEE80211_C_IBSS /* IBSS mode supported */ 459 | IEEE80211_C_HOSTAP /* Host access point mode */ 460 | IEEE80211_C_MONITOR /* monitor mode supported */ 461 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 462 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 463 | IEEE80211_C_TXFRAG /* handle tx frags */ 464 | IEEE80211_C_TXPMGT /* tx power management */ 465 | IEEE80211_C_SHSLOT /* short slot time supported */ 466 | IEEE80211_C_WPA /* 802.11i */ 467 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 468 | IEEE80211_C_WME /* 802.11e */ 469 | IEEE80211_C_PMGT /* Station-side power mgmt */ 470 ; 471 472 ic->ic_cryptocaps = 473 IEEE80211_CRYPTO_AES_CCM; 474 475 /* 476 * Read in the eeprom and also setup the channels for 477 * net80211. We don't set the rates as net80211 does this for us 478 */ 479 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 480 device_printf(dev, "could not read EEPROM, error %d\n", 481 error); 482 goto fail; 483 } 484 485 #ifdef WPI_DEBUG 486 if (bootverbose) { 487 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 488 sc->domain); 489 device_printf(sc->sc_dev, "Hardware Type: %c\n", 490 sc->type > 1 ? 'B': '?'); 491 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 492 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 493 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 494 supportsa ? "does" : "does not"); 495 496 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 497 check what sc->rev really represents - benjsc 20070615 */ 498 } 499 #endif 500 501 ieee80211_ifattach(ic); 502 ic->ic_vap_create = wpi_vap_create; 503 ic->ic_vap_delete = wpi_vap_delete; 504 ic->ic_parent = wpi_parent; 505 ic->ic_raw_xmit = wpi_raw_xmit; 506 ic->ic_transmit = wpi_transmit; 507 ic->ic_node_alloc = wpi_node_alloc; 508 sc->sc_node_free = ic->ic_node_free; 509 ic->ic_node_free = wpi_node_free; 510 ic->ic_wme.wme_update = wpi_updateedca; 511 ic->ic_update_promisc = wpi_update_promisc; 512 ic->ic_update_mcast = wpi_update_mcast; 513 ic->ic_newassoc = wpi_newassoc; 514 ic->ic_scan_start = wpi_scan_start; 515 ic->ic_scan_end = wpi_scan_end; 516 ic->ic_set_channel = wpi_set_channel; 517 ic->ic_scan_curchan = wpi_scan_curchan; 518 ic->ic_scan_mindwell = wpi_scan_mindwell; 519 ic->ic_setregdomain = wpi_setregdomain; 520 521 sc->sc_update_rx_ring = wpi_update_rx_ring; 522 sc->sc_update_tx_ring = wpi_update_tx_ring; 523 524 wpi_radiotap_attach(sc); 525 526 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 528 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 529 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 530 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 531 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 532 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 533 534 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 535 taskqueue_thread_enqueue, &sc->sc_tq); 536 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 537 if (error != 0) { 538 device_printf(dev, "can't start threads, error %d\n", error); 539 goto fail; 540 } 541 542 wpi_sysctlattach(sc); 543 544 /* 545 * Hook our interrupt after all initialization is complete. 546 */ 547 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 548 NULL, wpi_intr, sc, &sc->sc_ih); 549 if (error != 0) { 550 device_printf(dev, "can't establish interrupt, error %d\n", 551 error); 552 goto fail; 553 } 554 555 if (bootverbose) 556 ieee80211_announce(ic); 557 558 #ifdef WPI_DEBUG 559 if (sc->sc_debug & WPI_DEBUG_HW) 560 ieee80211_announce_channels(ic); 561 #endif 562 563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 564 return 0; 565 566 fail: wpi_detach(dev); 567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 568 return error; 569 } 570 571 /* 572 * Attach the interface to 802.11 radiotap. 573 */ 574 static void 575 wpi_radiotap_attach(struct wpi_softc *sc) 576 { 577 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 578 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 579 580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 581 ieee80211_radiotap_attach(&sc->sc_ic, 582 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 583 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 585 } 586 587 static void 588 wpi_sysctlattach(struct wpi_softc *sc) 589 { 590 #ifdef WPI_DEBUG 591 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 592 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 593 594 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 595 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 596 "control debugging printfs"); 597 #endif 598 } 599 600 static void 601 wpi_init_beacon(struct wpi_vap *wvp) 602 { 603 struct wpi_buf *bcn = &wvp->wv_bcbuf; 604 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 605 606 cmd->id = WPI_ID_BROADCAST; 607 cmd->ofdm_mask = 0xff; 608 cmd->cck_mask = 0x0f; 609 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 610 611 /* 612 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 613 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 614 */ 615 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 616 617 bcn->code = WPI_CMD_SET_BEACON; 618 bcn->ac = WPI_CMD_QUEUE_NUM; 619 bcn->size = sizeof(struct wpi_cmd_beacon); 620 } 621 622 static struct ieee80211vap * 623 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 624 enum ieee80211_opmode opmode, int flags, 625 const uint8_t bssid[IEEE80211_ADDR_LEN], 626 const uint8_t mac[IEEE80211_ADDR_LEN]) 627 { 628 struct wpi_vap *wvp; 629 struct ieee80211vap *vap; 630 631 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 632 return NULL; 633 634 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 635 vap = &wvp->wv_vap; 636 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 637 638 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 639 WPI_VAP_LOCK_INIT(wvp); 640 wpi_init_beacon(wvp); 641 } 642 643 /* Override with driver methods. */ 644 vap->iv_key_set = wpi_key_set; 645 vap->iv_key_delete = wpi_key_delete; 646 if (opmode == IEEE80211_M_IBSS) { 647 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 648 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 649 } 650 wvp->wv_newstate = vap->iv_newstate; 651 vap->iv_newstate = wpi_newstate; 652 vap->iv_update_beacon = wpi_update_beacon; 653 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 654 655 ieee80211_ratectl_init(vap); 656 /* Complete setup. */ 657 ieee80211_vap_attach(vap, ieee80211_media_change, 658 ieee80211_media_status, mac); 659 ic->ic_opmode = opmode; 660 return vap; 661 } 662 663 static void 664 wpi_vap_delete(struct ieee80211vap *vap) 665 { 666 struct wpi_vap *wvp = WPI_VAP(vap); 667 struct wpi_buf *bcn = &wvp->wv_bcbuf; 668 enum ieee80211_opmode opmode = vap->iv_opmode; 669 670 ieee80211_ratectl_deinit(vap); 671 ieee80211_vap_detach(vap); 672 673 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 674 if (bcn->m != NULL) 675 m_freem(bcn->m); 676 677 WPI_VAP_LOCK_DESTROY(wvp); 678 } 679 680 free(wvp, M_80211_VAP); 681 } 682 683 static int 684 wpi_detach(device_t dev) 685 { 686 struct wpi_softc *sc = device_get_softc(dev); 687 struct ieee80211com *ic = &sc->sc_ic; 688 uint8_t qid; 689 690 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 691 692 if (ic->ic_vap_create == wpi_vap_create) { 693 ieee80211_draintask(ic, &sc->sc_radioon_task); 694 695 wpi_stop(sc); 696 697 if (sc->sc_tq != NULL) { 698 taskqueue_drain_all(sc->sc_tq); 699 taskqueue_free(sc->sc_tq); 700 } 701 702 callout_drain(&sc->watchdog_rfkill); 703 callout_drain(&sc->tx_timeout); 704 callout_drain(&sc->scan_timeout); 705 callout_drain(&sc->calib_to); 706 ieee80211_ifdetach(ic); 707 } 708 709 /* Uninstall interrupt handler. */ 710 if (sc->irq != NULL) { 711 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 712 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 713 sc->irq); 714 pci_release_msi(dev); 715 } 716 717 if (sc->txq[0].data_dmat) { 718 /* Free DMA resources. */ 719 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 720 wpi_free_tx_ring(sc, &sc->txq[qid]); 721 722 wpi_free_rx_ring(sc); 723 wpi_free_shared(sc); 724 } 725 726 if (sc->fw_dma.tag) 727 wpi_free_fwmem(sc); 728 729 if (sc->mem != NULL) 730 bus_release_resource(dev, SYS_RES_MEMORY, 731 rman_get_rid(sc->mem), sc->mem); 732 733 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 734 WPI_TXQ_STATE_LOCK_DESTROY(sc); 735 WPI_TXQ_LOCK_DESTROY(sc); 736 WPI_NT_LOCK_DESTROY(sc); 737 WPI_RXON_LOCK_DESTROY(sc); 738 WPI_TX_LOCK_DESTROY(sc); 739 WPI_LOCK_DESTROY(sc); 740 return 0; 741 } 742 743 static int 744 wpi_shutdown(device_t dev) 745 { 746 struct wpi_softc *sc = device_get_softc(dev); 747 748 wpi_stop(sc); 749 return 0; 750 } 751 752 static int 753 wpi_suspend(device_t dev) 754 { 755 struct wpi_softc *sc = device_get_softc(dev); 756 struct ieee80211com *ic = &sc->sc_ic; 757 758 ieee80211_suspend_all(ic); 759 return 0; 760 } 761 762 static int 763 wpi_resume(device_t dev) 764 { 765 struct wpi_softc *sc = device_get_softc(dev); 766 struct ieee80211com *ic = &sc->sc_ic; 767 768 /* Clear device-specific "PCI retry timeout" register (41h). */ 769 pci_write_config(dev, 0x41, 0, 1); 770 771 ieee80211_resume_all(ic); 772 return 0; 773 } 774 775 /* 776 * Grab exclusive access to NIC memory. 777 */ 778 static int 779 wpi_nic_lock(struct wpi_softc *sc) 780 { 781 int ntries; 782 783 /* Request exclusive access to NIC. */ 784 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 785 786 /* Spin until we actually get the lock. */ 787 for (ntries = 0; ntries < 1000; ntries++) { 788 if ((WPI_READ(sc, WPI_GP_CNTRL) & 789 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 790 WPI_GP_CNTRL_MAC_ACCESS_ENA) 791 return 0; 792 DELAY(10); 793 } 794 795 device_printf(sc->sc_dev, "could not lock memory\n"); 796 797 return ETIMEDOUT; 798 } 799 800 /* 801 * Release lock on NIC memory. 802 */ 803 static __inline void 804 wpi_nic_unlock(struct wpi_softc *sc) 805 { 806 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 807 } 808 809 static __inline uint32_t 810 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 811 { 812 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 813 WPI_BARRIER_READ_WRITE(sc); 814 return WPI_READ(sc, WPI_PRPH_RDATA); 815 } 816 817 static __inline void 818 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 819 { 820 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 821 WPI_BARRIER_WRITE(sc); 822 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 823 } 824 825 static __inline void 826 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 827 { 828 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 829 } 830 831 static __inline void 832 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 833 { 834 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 835 } 836 837 static __inline void 838 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 839 const uint32_t *data, uint32_t count) 840 { 841 for (; count != 0; count--, data++, addr += 4) 842 wpi_prph_write(sc, addr, *data); 843 } 844 845 static __inline uint32_t 846 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 847 { 848 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 849 WPI_BARRIER_READ_WRITE(sc); 850 return WPI_READ(sc, WPI_MEM_RDATA); 851 } 852 853 static __inline void 854 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 855 int count) 856 { 857 for (; count > 0; count--, addr += 4) 858 *data++ = wpi_mem_read(sc, addr); 859 } 860 861 static int 862 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 863 { 864 uint8_t *out = data; 865 uint32_t val; 866 int error, ntries; 867 868 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 869 870 if ((error = wpi_nic_lock(sc)) != 0) 871 return error; 872 873 for (; count > 0; count -= 2, addr++) { 874 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 875 for (ntries = 0; ntries < 10; ntries++) { 876 val = WPI_READ(sc, WPI_EEPROM); 877 if (val & WPI_EEPROM_READ_VALID) 878 break; 879 DELAY(5); 880 } 881 if (ntries == 10) { 882 device_printf(sc->sc_dev, 883 "timeout reading ROM at 0x%x\n", addr); 884 return ETIMEDOUT; 885 } 886 *out++= val >> 16; 887 if (count > 1) 888 *out ++= val >> 24; 889 } 890 891 wpi_nic_unlock(sc); 892 893 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 894 895 return 0; 896 } 897 898 static void 899 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 900 { 901 if (error != 0) 902 return; 903 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 904 *(bus_addr_t *)arg = segs[0].ds_addr; 905 } 906 907 /* 908 * Allocates a contiguous block of dma memory of the requested size and 909 * alignment. 910 */ 911 static int 912 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 913 void **kvap, bus_size_t size, bus_size_t alignment) 914 { 915 int error; 916 917 dma->tag = NULL; 918 dma->size = size; 919 920 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 921 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 922 1, size, 0, NULL, NULL, &dma->tag); 923 if (error != 0) 924 goto fail; 925 926 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 927 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 928 if (error != 0) 929 goto fail; 930 931 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 932 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 933 if (error != 0) 934 goto fail; 935 936 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 937 938 if (kvap != NULL) 939 *kvap = dma->vaddr; 940 941 return 0; 942 943 fail: wpi_dma_contig_free(dma); 944 return error; 945 } 946 947 static void 948 wpi_dma_contig_free(struct wpi_dma_info *dma) 949 { 950 if (dma->vaddr != NULL) { 951 bus_dmamap_sync(dma->tag, dma->map, 952 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 953 bus_dmamap_unload(dma->tag, dma->map); 954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 955 dma->vaddr = NULL; 956 } 957 if (dma->tag != NULL) { 958 bus_dma_tag_destroy(dma->tag); 959 dma->tag = NULL; 960 } 961 } 962 963 /* 964 * Allocate a shared page between host and NIC. 965 */ 966 static int 967 wpi_alloc_shared(struct wpi_softc *sc) 968 { 969 /* Shared buffer must be aligned on a 4KB boundary. */ 970 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 971 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 972 } 973 974 static void 975 wpi_free_shared(struct wpi_softc *sc) 976 { 977 wpi_dma_contig_free(&sc->shared_dma); 978 } 979 980 /* 981 * Allocate DMA-safe memory for firmware transfer. 982 */ 983 static int 984 wpi_alloc_fwmem(struct wpi_softc *sc) 985 { 986 /* Must be aligned on a 16-byte boundary. */ 987 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 988 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 989 } 990 991 static void 992 wpi_free_fwmem(struct wpi_softc *sc) 993 { 994 wpi_dma_contig_free(&sc->fw_dma); 995 } 996 997 static int 998 wpi_alloc_rx_ring(struct wpi_softc *sc) 999 { 1000 struct wpi_rx_ring *ring = &sc->rxq; 1001 bus_size_t size; 1002 int i, error; 1003 1004 ring->cur = 0; 1005 ring->update = 0; 1006 1007 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1008 1009 /* Allocate RX descriptors (16KB aligned.) */ 1010 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1011 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1012 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1013 if (error != 0) { 1014 device_printf(sc->sc_dev, 1015 "%s: could not allocate RX ring DMA memory, error %d\n", 1016 __func__, error); 1017 goto fail; 1018 } 1019 1020 /* Create RX buffer DMA tag. */ 1021 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1022 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1023 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1024 if (error != 0) { 1025 device_printf(sc->sc_dev, 1026 "%s: could not create RX buf DMA tag, error %d\n", 1027 __func__, error); 1028 goto fail; 1029 } 1030 1031 /* 1032 * Allocate and map RX buffers. 1033 */ 1034 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1035 struct wpi_rx_data *data = &ring->data[i]; 1036 bus_addr_t paddr; 1037 1038 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1039 if (error != 0) { 1040 device_printf(sc->sc_dev, 1041 "%s: could not create RX buf DMA map, error %d\n", 1042 __func__, error); 1043 goto fail; 1044 } 1045 1046 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1047 if (data->m == NULL) { 1048 device_printf(sc->sc_dev, 1049 "%s: could not allocate RX mbuf\n", __func__); 1050 error = ENOBUFS; 1051 goto fail; 1052 } 1053 1054 error = bus_dmamap_load(ring->data_dmat, data->map, 1055 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1056 &paddr, BUS_DMA_NOWAIT); 1057 if (error != 0 && error != EFBIG) { 1058 device_printf(sc->sc_dev, 1059 "%s: can't map mbuf (error %d)\n", __func__, 1060 error); 1061 goto fail; 1062 } 1063 1064 /* Set physical address of RX buffer. */ 1065 ring->desc[i] = htole32(paddr); 1066 } 1067 1068 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1069 BUS_DMASYNC_PREWRITE); 1070 1071 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1072 1073 return 0; 1074 1075 fail: wpi_free_rx_ring(sc); 1076 1077 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1078 1079 return error; 1080 } 1081 1082 static void 1083 wpi_update_rx_ring(struct wpi_softc *sc) 1084 { 1085 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1086 } 1087 1088 static void 1089 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1090 { 1091 struct wpi_rx_ring *ring = &sc->rxq; 1092 1093 if (ring->update != 0) { 1094 /* Wait for INT_WAKEUP event. */ 1095 return; 1096 } 1097 1098 WPI_TXQ_LOCK(sc); 1099 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1100 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1101 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1102 __func__); 1103 ring->update = 1; 1104 } else { 1105 wpi_update_rx_ring(sc); 1106 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1107 } 1108 WPI_TXQ_UNLOCK(sc); 1109 } 1110 1111 static void 1112 wpi_reset_rx_ring(struct wpi_softc *sc) 1113 { 1114 struct wpi_rx_ring *ring = &sc->rxq; 1115 int ntries; 1116 1117 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1118 1119 if (wpi_nic_lock(sc) == 0) { 1120 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1121 for (ntries = 0; ntries < 1000; ntries++) { 1122 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1123 WPI_FH_RX_STATUS_IDLE) 1124 break; 1125 DELAY(10); 1126 } 1127 wpi_nic_unlock(sc); 1128 } 1129 1130 ring->cur = 0; 1131 ring->update = 0; 1132 } 1133 1134 static void 1135 wpi_free_rx_ring(struct wpi_softc *sc) 1136 { 1137 struct wpi_rx_ring *ring = &sc->rxq; 1138 int i; 1139 1140 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1141 1142 wpi_dma_contig_free(&ring->desc_dma); 1143 1144 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1145 struct wpi_rx_data *data = &ring->data[i]; 1146 1147 if (data->m != NULL) { 1148 bus_dmamap_sync(ring->data_dmat, data->map, 1149 BUS_DMASYNC_POSTREAD); 1150 bus_dmamap_unload(ring->data_dmat, data->map); 1151 m_freem(data->m); 1152 data->m = NULL; 1153 } 1154 if (data->map != NULL) 1155 bus_dmamap_destroy(ring->data_dmat, data->map); 1156 } 1157 if (ring->data_dmat != NULL) { 1158 bus_dma_tag_destroy(ring->data_dmat); 1159 ring->data_dmat = NULL; 1160 } 1161 } 1162 1163 static int 1164 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1165 { 1166 bus_addr_t paddr; 1167 bus_size_t size; 1168 int i, error; 1169 1170 ring->qid = qid; 1171 ring->queued = 0; 1172 ring->cur = 0; 1173 ring->pending = 0; 1174 ring->update = 0; 1175 1176 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1177 1178 /* Allocate TX descriptors (16KB aligned.) */ 1179 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1180 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1181 size, WPI_RING_DMA_ALIGN); 1182 if (error != 0) { 1183 device_printf(sc->sc_dev, 1184 "%s: could not allocate TX ring DMA memory, error %d\n", 1185 __func__, error); 1186 goto fail; 1187 } 1188 1189 /* Update shared area with ring physical address. */ 1190 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1191 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1192 BUS_DMASYNC_PREWRITE); 1193 1194 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1195 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1196 size, 4); 1197 if (error != 0) { 1198 device_printf(sc->sc_dev, 1199 "%s: could not allocate TX cmd DMA memory, error %d\n", 1200 __func__, error); 1201 goto fail; 1202 } 1203 1204 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1206 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1207 if (error != 0) { 1208 device_printf(sc->sc_dev, 1209 "%s: could not create TX buf DMA tag, error %d\n", 1210 __func__, error); 1211 goto fail; 1212 } 1213 1214 paddr = ring->cmd_dma.paddr; 1215 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1216 struct wpi_tx_data *data = &ring->data[i]; 1217 1218 data->cmd_paddr = paddr; 1219 paddr += sizeof (struct wpi_tx_cmd); 1220 1221 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1222 if (error != 0) { 1223 device_printf(sc->sc_dev, 1224 "%s: could not create TX buf DMA map, error %d\n", 1225 __func__, error); 1226 goto fail; 1227 } 1228 } 1229 1230 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1231 1232 return 0; 1233 1234 fail: wpi_free_tx_ring(sc, ring); 1235 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1236 return error; 1237 } 1238 1239 static void 1240 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1241 { 1242 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1243 } 1244 1245 static void 1246 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1247 { 1248 1249 if (ring->update != 0) { 1250 /* Wait for INT_WAKEUP event. */ 1251 return; 1252 } 1253 1254 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1255 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1256 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1257 __func__, ring->qid); 1258 ring->update = 1; 1259 } else { 1260 wpi_update_tx_ring(sc, ring); 1261 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1262 } 1263 } 1264 1265 static void 1266 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1267 { 1268 int i; 1269 1270 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1271 1272 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1273 struct wpi_tx_data *data = &ring->data[i]; 1274 1275 if (data->m != NULL) { 1276 bus_dmamap_sync(ring->data_dmat, data->map, 1277 BUS_DMASYNC_POSTWRITE); 1278 bus_dmamap_unload(ring->data_dmat, data->map); 1279 m_freem(data->m); 1280 data->m = NULL; 1281 } 1282 if (data->ni != NULL) { 1283 ieee80211_free_node(data->ni); 1284 data->ni = NULL; 1285 } 1286 } 1287 /* Clear TX descriptors. */ 1288 memset(ring->desc, 0, ring->desc_dma.size); 1289 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1290 BUS_DMASYNC_PREWRITE); 1291 ring->queued = 0; 1292 ring->cur = 0; 1293 ring->pending = 0; 1294 ring->update = 0; 1295 } 1296 1297 static void 1298 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1299 { 1300 int i; 1301 1302 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1303 1304 wpi_dma_contig_free(&ring->desc_dma); 1305 wpi_dma_contig_free(&ring->cmd_dma); 1306 1307 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1308 struct wpi_tx_data *data = &ring->data[i]; 1309 1310 if (data->m != NULL) { 1311 bus_dmamap_sync(ring->data_dmat, data->map, 1312 BUS_DMASYNC_POSTWRITE); 1313 bus_dmamap_unload(ring->data_dmat, data->map); 1314 m_freem(data->m); 1315 } 1316 if (data->map != NULL) 1317 bus_dmamap_destroy(ring->data_dmat, data->map); 1318 } 1319 if (ring->data_dmat != NULL) { 1320 bus_dma_tag_destroy(ring->data_dmat); 1321 ring->data_dmat = NULL; 1322 } 1323 } 1324 1325 /* 1326 * Extract various information from EEPROM. 1327 */ 1328 static int 1329 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1330 { 1331 #define WPI_CHK(res) do { \ 1332 if ((error = res) != 0) \ 1333 goto fail; \ 1334 } while (0) 1335 uint8_t i; 1336 int error; 1337 1338 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1339 1340 /* Adapter has to be powered on for EEPROM access to work. */ 1341 if ((error = wpi_apm_init(sc)) != 0) { 1342 device_printf(sc->sc_dev, 1343 "%s: could not power ON adapter, error %d\n", __func__, 1344 error); 1345 return error; 1346 } 1347 1348 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1349 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1350 error = EIO; 1351 goto fail; 1352 } 1353 /* Clear HW ownership of EEPROM. */ 1354 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1355 1356 /* Read the hardware capabilities, revision and SKU type. */ 1357 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1358 sizeof(sc->cap))); 1359 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1360 sizeof(sc->rev))); 1361 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1362 sizeof(sc->type))); 1363 1364 sc->rev = le16toh(sc->rev); 1365 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1366 sc->rev, sc->type); 1367 1368 /* Read the regulatory domain (4 ASCII characters.) */ 1369 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1370 sizeof(sc->domain))); 1371 1372 /* Read MAC address. */ 1373 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1374 IEEE80211_ADDR_LEN)); 1375 1376 /* Read the list of authorized channels. */ 1377 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1378 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1379 1380 /* Read the list of TX power groups. */ 1381 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1382 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1383 1384 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1385 1386 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1387 __func__); 1388 1389 return error; 1390 #undef WPI_CHK 1391 } 1392 1393 /* 1394 * Translate EEPROM flags to net80211. 1395 */ 1396 static uint32_t 1397 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1398 { 1399 uint32_t nflags; 1400 1401 nflags = 0; 1402 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1403 nflags |= IEEE80211_CHAN_PASSIVE; 1404 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1405 nflags |= IEEE80211_CHAN_NOADHOC; 1406 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1407 nflags |= IEEE80211_CHAN_DFS; 1408 /* XXX apparently IBSS may still be marked */ 1409 nflags |= IEEE80211_CHAN_NOADHOC; 1410 } 1411 1412 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1413 if (nflags & IEEE80211_CHAN_NOADHOC) 1414 nflags |= IEEE80211_CHAN_NOHOSTAP; 1415 1416 return nflags; 1417 } 1418 1419 static void 1420 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n) 1421 { 1422 struct ieee80211com *ic = &sc->sc_ic; 1423 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1424 const struct wpi_chan_band *band = &wpi_bands[n]; 1425 struct ieee80211_channel *c; 1426 uint32_t nflags; 1427 uint8_t chan, i; 1428 1429 for (i = 0; i < band->nchan; i++) { 1430 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1431 DPRINTF(sc, WPI_DEBUG_EEPROM, 1432 "Channel Not Valid: %d, band %d\n", 1433 band->chan[i],n); 1434 continue; 1435 } 1436 1437 chan = band->chan[i]; 1438 nflags = wpi_eeprom_channel_flags(&channels[i]); 1439 1440 c = &ic->ic_channels[ic->ic_nchans++]; 1441 c->ic_ieee = chan; 1442 c->ic_maxregpower = channels[i].maxpwr; 1443 c->ic_maxpower = 2*c->ic_maxregpower; 1444 1445 if (n == 0) { /* 2GHz band */ 1446 c->ic_freq = ieee80211_ieee2mhz(chan, 1447 IEEE80211_CHAN_G); 1448 1449 /* G =>'s B is supported */ 1450 c->ic_flags = IEEE80211_CHAN_B | nflags; 1451 c = &ic->ic_channels[ic->ic_nchans++]; 1452 c[0] = c[-1]; 1453 c->ic_flags = IEEE80211_CHAN_G | nflags; 1454 } else { /* 5GHz band */ 1455 c->ic_freq = ieee80211_ieee2mhz(chan, 1456 IEEE80211_CHAN_A); 1457 1458 c->ic_flags = IEEE80211_CHAN_A | nflags; 1459 } 1460 1461 /* Save maximum allowed TX power for this channel. */ 1462 sc->maxpwr[chan] = channels[i].maxpwr; 1463 1464 DPRINTF(sc, WPI_DEBUG_EEPROM, 1465 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1466 " offset %d\n", chan, c->ic_freq, 1467 channels[i].flags, sc->maxpwr[chan], 1468 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1469 } 1470 } 1471 1472 /** 1473 * Read the eeprom to find out what channels are valid for the given 1474 * band and update net80211 with what we find. 1475 */ 1476 static int 1477 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1478 { 1479 struct ieee80211com *ic = &sc->sc_ic; 1480 const struct wpi_chan_band *band = &wpi_bands[n]; 1481 int error; 1482 1483 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1484 1485 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1486 band->nchan * sizeof (struct wpi_eeprom_chan)); 1487 if (error != 0) { 1488 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1489 return error; 1490 } 1491 1492 wpi_read_eeprom_band(sc, n); 1493 1494 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1495 1496 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1497 1498 return 0; 1499 } 1500 1501 static struct wpi_eeprom_chan * 1502 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1503 { 1504 int i, j; 1505 1506 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1507 for (i = 0; i < wpi_bands[j].nchan; i++) 1508 if (wpi_bands[j].chan[i] == c->ic_ieee) 1509 return &sc->eeprom_channels[j][i]; 1510 1511 return NULL; 1512 } 1513 1514 /* 1515 * Enforce flags read from EEPROM. 1516 */ 1517 static int 1518 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1519 int nchan, struct ieee80211_channel chans[]) 1520 { 1521 struct wpi_softc *sc = ic->ic_softc; 1522 int i; 1523 1524 for (i = 0; i < nchan; i++) { 1525 struct ieee80211_channel *c = &chans[i]; 1526 struct wpi_eeprom_chan *channel; 1527 1528 channel = wpi_find_eeprom_channel(sc, c); 1529 if (channel == NULL) { 1530 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1531 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1532 return EINVAL; 1533 } 1534 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1535 } 1536 1537 return 0; 1538 } 1539 1540 static int 1541 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1542 { 1543 struct wpi_power_group *group = &sc->groups[n]; 1544 struct wpi_eeprom_group rgroup; 1545 int i, error; 1546 1547 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1548 1549 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1550 &rgroup, sizeof rgroup)) != 0) { 1551 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1552 return error; 1553 } 1554 1555 /* Save TX power group information. */ 1556 group->chan = rgroup.chan; 1557 group->maxpwr = rgroup.maxpwr; 1558 /* Retrieve temperature at which the samples were taken. */ 1559 group->temp = (int16_t)le16toh(rgroup.temp); 1560 1561 DPRINTF(sc, WPI_DEBUG_EEPROM, 1562 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1563 group->maxpwr, group->temp); 1564 1565 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1566 group->samples[i].index = rgroup.samples[i].index; 1567 group->samples[i].power = rgroup.samples[i].power; 1568 1569 DPRINTF(sc, WPI_DEBUG_EEPROM, 1570 "\tsample %d: index=%d power=%d\n", i, 1571 group->samples[i].index, group->samples[i].power); 1572 } 1573 1574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1575 1576 return 0; 1577 } 1578 1579 static __inline uint8_t 1580 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1581 { 1582 uint8_t newid = WPI_ID_IBSS_MIN; 1583 1584 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1585 if ((sc->nodesmsk & (1 << newid)) == 0) { 1586 sc->nodesmsk |= 1 << newid; 1587 return newid; 1588 } 1589 } 1590 1591 return WPI_ID_UNDEFINED; 1592 } 1593 1594 static __inline uint8_t 1595 wpi_add_node_entry_sta(struct wpi_softc *sc) 1596 { 1597 sc->nodesmsk |= 1 << WPI_ID_BSS; 1598 1599 return WPI_ID_BSS; 1600 } 1601 1602 static __inline int 1603 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1604 { 1605 if (id == WPI_ID_UNDEFINED) 1606 return 0; 1607 1608 return (sc->nodesmsk >> id) & 1; 1609 } 1610 1611 static __inline void 1612 wpi_clear_node_table(struct wpi_softc *sc) 1613 { 1614 sc->nodesmsk = 0; 1615 } 1616 1617 static __inline void 1618 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1619 { 1620 sc->nodesmsk &= ~(1 << id); 1621 } 1622 1623 static struct ieee80211_node * 1624 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1625 { 1626 struct wpi_node *wn; 1627 1628 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1629 M_NOWAIT | M_ZERO); 1630 1631 if (wn == NULL) 1632 return NULL; 1633 1634 wn->id = WPI_ID_UNDEFINED; 1635 1636 return &wn->ni; 1637 } 1638 1639 static void 1640 wpi_node_free(struct ieee80211_node *ni) 1641 { 1642 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1643 struct wpi_node *wn = WPI_NODE(ni); 1644 1645 if (wn->id != WPI_ID_UNDEFINED) { 1646 WPI_NT_LOCK(sc); 1647 if (wpi_check_node_entry(sc, wn->id)) { 1648 wpi_del_node_entry(sc, wn->id); 1649 wpi_del_node(sc, ni); 1650 } 1651 WPI_NT_UNLOCK(sc); 1652 } 1653 1654 sc->sc_node_free(ni); 1655 } 1656 1657 static __inline int 1658 wpi_check_bss_filter(struct wpi_softc *sc) 1659 { 1660 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1661 } 1662 1663 static void 1664 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1665 const struct ieee80211_rx_stats *rxs, 1666 int rssi, int nf) 1667 { 1668 struct ieee80211vap *vap = ni->ni_vap; 1669 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1670 struct wpi_vap *wvp = WPI_VAP(vap); 1671 uint64_t ni_tstamp, rx_tstamp; 1672 1673 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1674 1675 if (vap->iv_state == IEEE80211_S_RUN && 1676 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1677 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1678 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1679 rx_tstamp = le64toh(sc->rx_tstamp); 1680 1681 if (ni_tstamp >= rx_tstamp) { 1682 DPRINTF(sc, WPI_DEBUG_STATE, 1683 "ibss merge, tsf %ju tstamp %ju\n", 1684 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1685 (void) ieee80211_ibss_merge(ni); 1686 } 1687 } 1688 } 1689 1690 static void 1691 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1692 { 1693 struct wpi_softc *sc = arg; 1694 struct wpi_node *wn = WPI_NODE(ni); 1695 int error; 1696 1697 WPI_NT_LOCK(sc); 1698 if (wn->id != WPI_ID_UNDEFINED) { 1699 wn->id = WPI_ID_UNDEFINED; 1700 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1701 device_printf(sc->sc_dev, 1702 "%s: could not add IBSS node, error %d\n", 1703 __func__, error); 1704 } 1705 } 1706 WPI_NT_UNLOCK(sc); 1707 } 1708 1709 static void 1710 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1711 { 1712 struct ieee80211com *ic = &sc->sc_ic; 1713 1714 /* Set group keys once. */ 1715 WPI_NT_LOCK(sc); 1716 wvp->wv_gtk = 0; 1717 WPI_NT_UNLOCK(sc); 1718 1719 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1720 ieee80211_crypto_reload_keys(ic); 1721 } 1722 1723 /** 1724 * Called by net80211 when ever there is a change to 80211 state machine 1725 */ 1726 static int 1727 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1728 { 1729 struct wpi_vap *wvp = WPI_VAP(vap); 1730 struct ieee80211com *ic = vap->iv_ic; 1731 struct wpi_softc *sc = ic->ic_softc; 1732 int error = 0; 1733 1734 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1735 1736 WPI_TXQ_LOCK(sc); 1737 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1738 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1739 WPI_TXQ_UNLOCK(sc); 1740 1741 return ENXIO; 1742 } 1743 WPI_TXQ_UNLOCK(sc); 1744 1745 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1746 ieee80211_state_name[vap->iv_state], 1747 ieee80211_state_name[nstate]); 1748 1749 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1750 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1751 device_printf(sc->sc_dev, 1752 "%s: could not set power saving level\n", 1753 __func__); 1754 return error; 1755 } 1756 1757 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1758 } 1759 1760 switch (nstate) { 1761 case IEEE80211_S_SCAN: 1762 WPI_RXON_LOCK(sc); 1763 if (wpi_check_bss_filter(sc) != 0) { 1764 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1765 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1766 device_printf(sc->sc_dev, 1767 "%s: could not send RXON\n", __func__); 1768 } 1769 } 1770 WPI_RXON_UNLOCK(sc); 1771 break; 1772 1773 case IEEE80211_S_ASSOC: 1774 if (vap->iv_state != IEEE80211_S_RUN) 1775 break; 1776 /* FALLTHROUGH */ 1777 case IEEE80211_S_AUTH: 1778 /* 1779 * NB: do not optimize AUTH -> AUTH state transmission - 1780 * this will break powersave with non-QoS AP! 1781 */ 1782 1783 /* 1784 * The node must be registered in the firmware before auth. 1785 * Also the associd must be cleared on RUN -> ASSOC 1786 * transitions. 1787 */ 1788 if ((error = wpi_auth(sc, vap)) != 0) { 1789 device_printf(sc->sc_dev, 1790 "%s: could not move to AUTH state, error %d\n", 1791 __func__, error); 1792 } 1793 break; 1794 1795 case IEEE80211_S_RUN: 1796 /* 1797 * RUN -> RUN transition: 1798 * STA mode: Just restart the timers. 1799 * IBSS mode: Process IBSS merge. 1800 */ 1801 if (vap->iv_state == IEEE80211_S_RUN) { 1802 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1803 WPI_RXON_LOCK(sc); 1804 wpi_calib_timeout(sc); 1805 WPI_RXON_UNLOCK(sc); 1806 break; 1807 } else { 1808 /* 1809 * Drop the BSS_FILTER bit 1810 * (there is no another way to change bssid). 1811 */ 1812 WPI_RXON_LOCK(sc); 1813 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1814 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1815 device_printf(sc->sc_dev, 1816 "%s: could not send RXON\n", 1817 __func__); 1818 } 1819 WPI_RXON_UNLOCK(sc); 1820 1821 /* Restore all what was lost. */ 1822 wpi_restore_node_table(sc, wvp); 1823 1824 /* XXX set conditionally? */ 1825 wpi_updateedca(ic); 1826 } 1827 } 1828 1829 /* 1830 * !RUN -> RUN requires setting the association id 1831 * which is done with a firmware cmd. We also defer 1832 * starting the timers until that work is done. 1833 */ 1834 if ((error = wpi_run(sc, vap)) != 0) { 1835 device_printf(sc->sc_dev, 1836 "%s: could not move to RUN state\n", __func__); 1837 } 1838 break; 1839 1840 default: 1841 break; 1842 } 1843 if (error != 0) { 1844 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1845 return error; 1846 } 1847 1848 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1849 1850 return wvp->wv_newstate(vap, nstate, arg); 1851 } 1852 1853 static void 1854 wpi_calib_timeout(void *arg) 1855 { 1856 struct wpi_softc *sc = arg; 1857 1858 if (wpi_check_bss_filter(sc) == 0) 1859 return; 1860 1861 wpi_power_calibration(sc); 1862 1863 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1864 } 1865 1866 static __inline uint8_t 1867 rate2plcp(const uint8_t rate) 1868 { 1869 switch (rate) { 1870 case 12: return 0xd; 1871 case 18: return 0xf; 1872 case 24: return 0x5; 1873 case 36: return 0x7; 1874 case 48: return 0x9; 1875 case 72: return 0xb; 1876 case 96: return 0x1; 1877 case 108: return 0x3; 1878 case 2: return 10; 1879 case 4: return 20; 1880 case 11: return 55; 1881 case 22: return 110; 1882 default: return 0; 1883 } 1884 } 1885 1886 static __inline uint8_t 1887 plcp2rate(const uint8_t plcp) 1888 { 1889 switch (plcp) { 1890 case 0xd: return 12; 1891 case 0xf: return 18; 1892 case 0x5: return 24; 1893 case 0x7: return 36; 1894 case 0x9: return 48; 1895 case 0xb: return 72; 1896 case 0x1: return 96; 1897 case 0x3: return 108; 1898 case 10: return 2; 1899 case 20: return 4; 1900 case 55: return 11; 1901 case 110: return 22; 1902 default: return 0; 1903 } 1904 } 1905 1906 /* Quickly determine if a given rate is CCK or OFDM. */ 1907 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1908 1909 static void 1910 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1911 struct wpi_rx_data *data) 1912 { 1913 struct ieee80211com *ic = &sc->sc_ic; 1914 struct wpi_rx_ring *ring = &sc->rxq; 1915 struct wpi_rx_stat *stat; 1916 struct wpi_rx_head *head; 1917 struct wpi_rx_tail *tail; 1918 struct ieee80211_frame *wh; 1919 struct ieee80211_node *ni; 1920 struct mbuf *m, *m1; 1921 bus_addr_t paddr; 1922 uint32_t flags; 1923 uint16_t len; 1924 int error; 1925 1926 stat = (struct wpi_rx_stat *)(desc + 1); 1927 1928 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1929 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1930 goto fail1; 1931 } 1932 1933 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1934 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1935 len = le16toh(head->len); 1936 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1937 flags = le32toh(tail->flags); 1938 1939 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1940 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1941 le32toh(desc->len), len, (int8_t)stat->rssi, 1942 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1943 1944 /* Discard frames with a bad FCS early. */ 1945 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1946 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1947 __func__, flags); 1948 goto fail1; 1949 } 1950 /* Discard frames that are too short. */ 1951 if (len < sizeof (struct ieee80211_frame_ack)) { 1952 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1953 __func__, len); 1954 goto fail1; 1955 } 1956 1957 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1958 if (__predict_false(m1 == NULL)) { 1959 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1960 __func__); 1961 goto fail1; 1962 } 1963 bus_dmamap_unload(ring->data_dmat, data->map); 1964 1965 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1966 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1967 if (__predict_false(error != 0 && error != EFBIG)) { 1968 device_printf(sc->sc_dev, 1969 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1970 m_freem(m1); 1971 1972 /* Try to reload the old mbuf. */ 1973 error = bus_dmamap_load(ring->data_dmat, data->map, 1974 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1975 &paddr, BUS_DMA_NOWAIT); 1976 if (error != 0 && error != EFBIG) { 1977 panic("%s: could not load old RX mbuf", __func__); 1978 } 1979 /* Physical address may have changed. */ 1980 ring->desc[ring->cur] = htole32(paddr); 1981 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1982 BUS_DMASYNC_PREWRITE); 1983 goto fail1; 1984 } 1985 1986 m = data->m; 1987 data->m = m1; 1988 /* Update RX descriptor. */ 1989 ring->desc[ring->cur] = htole32(paddr); 1990 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1991 BUS_DMASYNC_PREWRITE); 1992 1993 /* Finalize mbuf. */ 1994 m->m_data = (caddr_t)(head + 1); 1995 m->m_pkthdr.len = m->m_len = len; 1996 1997 /* Grab a reference to the source node. */ 1998 wh = mtod(m, struct ieee80211_frame *); 1999 2000 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2001 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2002 /* Check whether decryption was successful or not. */ 2003 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2004 DPRINTF(sc, WPI_DEBUG_RECV, 2005 "CCMP decryption failed 0x%x\n", flags); 2006 goto fail2; 2007 } 2008 m->m_flags |= M_WEP; 2009 } 2010 2011 if (len >= sizeof(struct ieee80211_frame_min)) 2012 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2013 else 2014 ni = NULL; 2015 2016 sc->rx_tstamp = tail->tstamp; 2017 2018 if (ieee80211_radiotap_active(ic)) { 2019 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2020 2021 tap->wr_flags = 0; 2022 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2023 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2024 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2025 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2026 tap->wr_tsft = tail->tstamp; 2027 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2028 tap->wr_rate = plcp2rate(head->plcp); 2029 } 2030 2031 WPI_UNLOCK(sc); 2032 2033 /* Send the frame to the 802.11 layer. */ 2034 if (ni != NULL) { 2035 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2036 /* Node is no longer needed. */ 2037 ieee80211_free_node(ni); 2038 } else 2039 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2040 2041 WPI_LOCK(sc); 2042 2043 return; 2044 2045 fail2: m_freem(m); 2046 2047 fail1: counter_u64_add(ic->ic_ierrors, 1); 2048 } 2049 2050 static void 2051 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2052 struct wpi_rx_data *data) 2053 { 2054 /* Ignore */ 2055 } 2056 2057 static void 2058 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2059 { 2060 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2061 struct wpi_tx_data *data = &ring->data[desc->idx]; 2062 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2063 struct mbuf *m; 2064 struct ieee80211_node *ni; 2065 struct ieee80211vap *vap; 2066 struct ieee80211com *ic; 2067 uint32_t status = le32toh(stat->status); 2068 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2069 2070 KASSERT(data->ni != NULL, ("no node")); 2071 KASSERT(data->m != NULL, ("no mbuf")); 2072 2073 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2074 2075 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2076 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2077 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2078 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2079 2080 /* Unmap and free mbuf. */ 2081 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2082 bus_dmamap_unload(ring->data_dmat, data->map); 2083 m = data->m, data->m = NULL; 2084 ni = data->ni, data->ni = NULL; 2085 vap = ni->ni_vap; 2086 ic = vap->iv_ic; 2087 2088 /* 2089 * Update rate control statistics for the node. 2090 */ 2091 if (status & WPI_TX_STATUS_FAIL) { 2092 ieee80211_ratectl_tx_complete(vap, ni, 2093 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2094 } else 2095 ieee80211_ratectl_tx_complete(vap, ni, 2096 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2097 2098 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2099 2100 WPI_TXQ_STATE_LOCK(sc); 2101 if (--ring->queued > 0) 2102 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2103 else 2104 callout_stop(&sc->tx_timeout); 2105 WPI_TXQ_STATE_UNLOCK(sc); 2106 2107 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2108 } 2109 2110 /* 2111 * Process a "command done" firmware notification. This is where we wakeup 2112 * processes waiting for a synchronous command completion. 2113 */ 2114 static void 2115 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2116 { 2117 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2118 struct wpi_tx_data *data; 2119 struct wpi_tx_cmd *cmd; 2120 2121 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2122 "type %s len %d\n", desc->qid, desc->idx, 2123 desc->flags, wpi_cmd_str(desc->type), 2124 le32toh(desc->len)); 2125 2126 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2127 return; /* Not a command ack. */ 2128 2129 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2130 2131 data = &ring->data[desc->idx]; 2132 cmd = &ring->cmd[desc->idx]; 2133 2134 /* If the command was mapped in an mbuf, free it. */ 2135 if (data->m != NULL) { 2136 bus_dmamap_sync(ring->data_dmat, data->map, 2137 BUS_DMASYNC_POSTWRITE); 2138 bus_dmamap_unload(ring->data_dmat, data->map); 2139 m_freem(data->m); 2140 data->m = NULL; 2141 } 2142 2143 wakeup(cmd); 2144 2145 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2146 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2147 2148 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2149 BUS_DMASYNC_POSTREAD); 2150 2151 WPI_TXQ_LOCK(sc); 2152 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2153 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2154 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2155 } else { 2156 sc->sc_update_rx_ring = wpi_update_rx_ring; 2157 sc->sc_update_tx_ring = wpi_update_tx_ring; 2158 } 2159 WPI_TXQ_UNLOCK(sc); 2160 } 2161 } 2162 2163 static void 2164 wpi_notif_intr(struct wpi_softc *sc) 2165 { 2166 struct ieee80211com *ic = &sc->sc_ic; 2167 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2168 uint32_t hw; 2169 2170 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2171 BUS_DMASYNC_POSTREAD); 2172 2173 hw = le32toh(sc->shared->next) & 0xfff; 2174 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2175 2176 while (sc->rxq.cur != hw) { 2177 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2178 2179 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2180 struct wpi_rx_desc *desc; 2181 2182 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2183 BUS_DMASYNC_POSTREAD); 2184 desc = mtod(data->m, struct wpi_rx_desc *); 2185 2186 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2187 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2188 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2189 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2190 2191 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2192 /* Reply to a command. */ 2193 wpi_cmd_done(sc, desc); 2194 } 2195 2196 switch (desc->type) { 2197 case WPI_RX_DONE: 2198 /* An 802.11 frame has been received. */ 2199 wpi_rx_done(sc, desc, data); 2200 2201 if (__predict_false(sc->sc_running == 0)) { 2202 /* wpi_stop() was called. */ 2203 return; 2204 } 2205 2206 break; 2207 2208 case WPI_TX_DONE: 2209 /* An 802.11 frame has been transmitted. */ 2210 wpi_tx_done(sc, desc); 2211 break; 2212 2213 case WPI_RX_STATISTICS: 2214 case WPI_BEACON_STATISTICS: 2215 wpi_rx_statistics(sc, desc, data); 2216 break; 2217 2218 case WPI_BEACON_MISSED: 2219 { 2220 struct wpi_beacon_missed *miss = 2221 (struct wpi_beacon_missed *)(desc + 1); 2222 uint32_t expected, misses, received, threshold; 2223 2224 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2225 BUS_DMASYNC_POSTREAD); 2226 2227 misses = le32toh(miss->consecutive); 2228 expected = le32toh(miss->expected); 2229 received = le32toh(miss->received); 2230 threshold = MAX(2, vap->iv_bmissthreshold); 2231 2232 DPRINTF(sc, WPI_DEBUG_BMISS, 2233 "%s: beacons missed %u(%u) (received %u/%u)\n", 2234 __func__, misses, le32toh(miss->total), received, 2235 expected); 2236 2237 if (misses >= threshold || 2238 (received == 0 && expected >= threshold)) { 2239 WPI_RXON_LOCK(sc); 2240 if (callout_pending(&sc->scan_timeout)) { 2241 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2242 0, 1); 2243 } 2244 WPI_RXON_UNLOCK(sc); 2245 if (vap->iv_state == IEEE80211_S_RUN && 2246 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2247 ieee80211_beacon_miss(ic); 2248 } 2249 2250 break; 2251 } 2252 #ifdef WPI_DEBUG 2253 case WPI_BEACON_SENT: 2254 { 2255 struct wpi_tx_stat *stat = 2256 (struct wpi_tx_stat *)(desc + 1); 2257 uint64_t *tsf = (uint64_t *)(stat + 1); 2258 uint32_t *mode = (uint32_t *)(tsf + 1); 2259 2260 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2261 BUS_DMASYNC_POSTREAD); 2262 2263 DPRINTF(sc, WPI_DEBUG_BEACON, 2264 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2265 "duration %u, status %x, tsf %ju, mode %x\n", 2266 stat->rtsfailcnt, stat->ackfailcnt, 2267 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2268 le32toh(stat->status), le64toh(*tsf), 2269 le32toh(*mode)); 2270 2271 break; 2272 } 2273 #endif 2274 case WPI_UC_READY: 2275 { 2276 struct wpi_ucode_info *uc = 2277 (struct wpi_ucode_info *)(desc + 1); 2278 2279 /* The microcontroller is ready. */ 2280 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2281 BUS_DMASYNC_POSTREAD); 2282 DPRINTF(sc, WPI_DEBUG_RESET, 2283 "microcode alive notification version=%d.%d " 2284 "subtype=%x alive=%x\n", uc->major, uc->minor, 2285 uc->subtype, le32toh(uc->valid)); 2286 2287 if (le32toh(uc->valid) != 1) { 2288 device_printf(sc->sc_dev, 2289 "microcontroller initialization failed\n"); 2290 wpi_stop_locked(sc); 2291 return; 2292 } 2293 /* Save the address of the error log in SRAM. */ 2294 sc->errptr = le32toh(uc->errptr); 2295 break; 2296 } 2297 case WPI_STATE_CHANGED: 2298 { 2299 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2300 BUS_DMASYNC_POSTREAD); 2301 2302 uint32_t *status = (uint32_t *)(desc + 1); 2303 2304 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2305 le32toh(*status)); 2306 2307 if (le32toh(*status) & 1) { 2308 WPI_NT_LOCK(sc); 2309 wpi_clear_node_table(sc); 2310 WPI_NT_UNLOCK(sc); 2311 taskqueue_enqueue(sc->sc_tq, 2312 &sc->sc_radiooff_task); 2313 return; 2314 } 2315 break; 2316 } 2317 #ifdef WPI_DEBUG 2318 case WPI_START_SCAN: 2319 { 2320 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2321 BUS_DMASYNC_POSTREAD); 2322 2323 struct wpi_start_scan *scan = 2324 (struct wpi_start_scan *)(desc + 1); 2325 DPRINTF(sc, WPI_DEBUG_SCAN, 2326 "%s: scanning channel %d status %x\n", 2327 __func__, scan->chan, le32toh(scan->status)); 2328 2329 break; 2330 } 2331 #endif 2332 case WPI_STOP_SCAN: 2333 { 2334 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2335 BUS_DMASYNC_POSTREAD); 2336 2337 struct wpi_stop_scan *scan = 2338 (struct wpi_stop_scan *)(desc + 1); 2339 2340 DPRINTF(sc, WPI_DEBUG_SCAN, 2341 "scan finished nchan=%d status=%d chan=%d\n", 2342 scan->nchan, scan->status, scan->chan); 2343 2344 WPI_RXON_LOCK(sc); 2345 callout_stop(&sc->scan_timeout); 2346 WPI_RXON_UNLOCK(sc); 2347 if (scan->status == WPI_SCAN_ABORTED) 2348 ieee80211_cancel_scan(vap); 2349 else 2350 ieee80211_scan_next(vap); 2351 break; 2352 } 2353 } 2354 2355 if (sc->rxq.cur % 8 == 0) { 2356 /* Tell the firmware what we have processed. */ 2357 sc->sc_update_rx_ring(sc); 2358 } 2359 } 2360 } 2361 2362 /* 2363 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2364 * from power-down sleep mode. 2365 */ 2366 static void 2367 wpi_wakeup_intr(struct wpi_softc *sc) 2368 { 2369 int qid; 2370 2371 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2372 "%s: ucode wakeup from power-down sleep\n", __func__); 2373 2374 /* Wakeup RX and TX rings. */ 2375 if (sc->rxq.update) { 2376 sc->rxq.update = 0; 2377 wpi_update_rx_ring(sc); 2378 } 2379 WPI_TXQ_LOCK(sc); 2380 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2381 struct wpi_tx_ring *ring = &sc->txq[qid]; 2382 2383 if (ring->update) { 2384 ring->update = 0; 2385 wpi_update_tx_ring(sc, ring); 2386 } 2387 } 2388 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2389 WPI_TXQ_UNLOCK(sc); 2390 } 2391 2392 /* 2393 * This function prints firmware registers 2394 */ 2395 #ifdef WPI_DEBUG 2396 static void 2397 wpi_debug_registers(struct wpi_softc *sc) 2398 { 2399 size_t i; 2400 static const uint32_t csr_tbl[] = { 2401 WPI_HW_IF_CONFIG, 2402 WPI_INT, 2403 WPI_INT_MASK, 2404 WPI_FH_INT, 2405 WPI_GPIO_IN, 2406 WPI_RESET, 2407 WPI_GP_CNTRL, 2408 WPI_EEPROM, 2409 WPI_EEPROM_GP, 2410 WPI_GIO, 2411 WPI_UCODE_GP1, 2412 WPI_UCODE_GP2, 2413 WPI_GIO_CHICKEN, 2414 WPI_ANA_PLL, 2415 WPI_DBG_HPET_MEM, 2416 }; 2417 static const uint32_t prph_tbl[] = { 2418 WPI_APMG_CLK_CTRL, 2419 WPI_APMG_PS, 2420 WPI_APMG_PCI_STT, 2421 WPI_APMG_RFKILL, 2422 }; 2423 2424 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2425 2426 for (i = 0; i < nitems(csr_tbl); i++) { 2427 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2428 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2429 2430 if ((i + 1) % 2 == 0) 2431 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2432 } 2433 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2434 2435 if (wpi_nic_lock(sc) == 0) { 2436 for (i = 0; i < nitems(prph_tbl); i++) { 2437 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2438 wpi_get_prph_string(prph_tbl[i]), 2439 wpi_prph_read(sc, prph_tbl[i])); 2440 2441 if ((i + 1) % 2 == 0) 2442 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2443 } 2444 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2445 wpi_nic_unlock(sc); 2446 } else { 2447 DPRINTF(sc, WPI_DEBUG_REGISTER, 2448 "Cannot access internal registers.\n"); 2449 } 2450 } 2451 #endif 2452 2453 /* 2454 * Dump the error log of the firmware when a firmware panic occurs. Although 2455 * we can't debug the firmware because it is neither open source nor free, it 2456 * can help us to identify certain classes of problems. 2457 */ 2458 static void 2459 wpi_fatal_intr(struct wpi_softc *sc) 2460 { 2461 struct wpi_fw_dump dump; 2462 uint32_t i, offset, count; 2463 2464 /* Check that the error log address is valid. */ 2465 if (sc->errptr < WPI_FW_DATA_BASE || 2466 sc->errptr + sizeof (dump) > 2467 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2468 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2469 sc->errptr); 2470 return; 2471 } 2472 if (wpi_nic_lock(sc) != 0) { 2473 printf("%s: could not read firmware error log\n", __func__); 2474 return; 2475 } 2476 /* Read number of entries in the log. */ 2477 count = wpi_mem_read(sc, sc->errptr); 2478 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2479 printf("%s: invalid count field (count = %u)\n", __func__, 2480 count); 2481 wpi_nic_unlock(sc); 2482 return; 2483 } 2484 /* Skip "count" field. */ 2485 offset = sc->errptr + sizeof (uint32_t); 2486 printf("firmware error log (count = %u):\n", count); 2487 for (i = 0; i < count; i++) { 2488 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2489 sizeof (dump) / sizeof (uint32_t)); 2490 2491 printf(" error type = \"%s\" (0x%08X)\n", 2492 (dump.desc < nitems(wpi_fw_errmsg)) ? 2493 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2494 dump.desc); 2495 printf(" error data = 0x%08X\n", 2496 dump.data); 2497 printf(" branch link = 0x%08X%08X\n", 2498 dump.blink[0], dump.blink[1]); 2499 printf(" interrupt link = 0x%08X%08X\n", 2500 dump.ilink[0], dump.ilink[1]); 2501 printf(" time = %u\n", dump.time); 2502 2503 offset += sizeof (dump); 2504 } 2505 wpi_nic_unlock(sc); 2506 /* Dump driver status (TX and RX rings) while we're here. */ 2507 printf("driver status:\n"); 2508 WPI_TXQ_LOCK(sc); 2509 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2510 struct wpi_tx_ring *ring = &sc->txq[i]; 2511 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2512 i, ring->qid, ring->cur, ring->queued); 2513 } 2514 WPI_TXQ_UNLOCK(sc); 2515 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2516 } 2517 2518 static void 2519 wpi_intr(void *arg) 2520 { 2521 struct wpi_softc *sc = arg; 2522 uint32_t r1, r2; 2523 2524 WPI_LOCK(sc); 2525 2526 /* Disable interrupts. */ 2527 WPI_WRITE(sc, WPI_INT_MASK, 0); 2528 2529 r1 = WPI_READ(sc, WPI_INT); 2530 2531 if (__predict_false(r1 == 0xffffffff || 2532 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2533 goto end; /* Hardware gone! */ 2534 2535 r2 = WPI_READ(sc, WPI_FH_INT); 2536 2537 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2538 r1, r2); 2539 2540 if (r1 == 0 && r2 == 0) 2541 goto done; /* Interrupt not for us. */ 2542 2543 /* Acknowledge interrupts. */ 2544 WPI_WRITE(sc, WPI_INT, r1); 2545 WPI_WRITE(sc, WPI_FH_INT, r2); 2546 2547 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2548 device_printf(sc->sc_dev, "fatal firmware error\n"); 2549 #ifdef WPI_DEBUG 2550 wpi_debug_registers(sc); 2551 #endif 2552 wpi_fatal_intr(sc); 2553 DPRINTF(sc, WPI_DEBUG_HW, 2554 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2555 "(Hardware Error)"); 2556 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2557 goto end; 2558 } 2559 2560 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2561 (r2 & WPI_FH_INT_RX)) 2562 wpi_notif_intr(sc); 2563 2564 if (r1 & WPI_INT_ALIVE) 2565 wakeup(sc); /* Firmware is alive. */ 2566 2567 if (r1 & WPI_INT_WAKEUP) 2568 wpi_wakeup_intr(sc); 2569 2570 done: 2571 /* Re-enable interrupts. */ 2572 if (__predict_true(sc->sc_running)) 2573 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2574 2575 end: WPI_UNLOCK(sc); 2576 } 2577 2578 static void 2579 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2580 { 2581 struct wpi_tx_ring *ring; 2582 struct wpi_tx_data *data; 2583 uint8_t cur; 2584 2585 WPI_TXQ_LOCK(sc); 2586 ring = &sc->txq[ac]; 2587 2588 while (ring->pending != 0) { 2589 ring->pending--; 2590 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2591 data = &ring->data[cur]; 2592 2593 bus_dmamap_sync(ring->data_dmat, data->map, 2594 BUS_DMASYNC_POSTWRITE); 2595 bus_dmamap_unload(ring->data_dmat, data->map); 2596 m_freem(data->m); 2597 data->m = NULL; 2598 2599 ieee80211_node_decref(data->ni); 2600 data->ni = NULL; 2601 } 2602 2603 WPI_TXQ_UNLOCK(sc); 2604 } 2605 2606 static int 2607 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2608 { 2609 struct ieee80211_frame *wh; 2610 struct wpi_tx_cmd *cmd; 2611 struct wpi_tx_data *data; 2612 struct wpi_tx_desc *desc; 2613 struct wpi_tx_ring *ring; 2614 struct mbuf *m1; 2615 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2616 uint8_t cur, pad; 2617 uint16_t hdrlen; 2618 int error, i, nsegs, totlen, frag; 2619 2620 WPI_TXQ_LOCK(sc); 2621 2622 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2623 2624 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2625 2626 if (__predict_false(sc->sc_running == 0)) { 2627 /* wpi_stop() was called */ 2628 error = ENETDOWN; 2629 goto end; 2630 } 2631 2632 wh = mtod(buf->m, struct ieee80211_frame *); 2633 hdrlen = ieee80211_anyhdrsize(wh); 2634 totlen = buf->m->m_pkthdr.len; 2635 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2636 2637 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2638 error = EINVAL; 2639 goto end; 2640 } 2641 2642 if (hdrlen & 3) { 2643 /* First segment length must be a multiple of 4. */ 2644 pad = 4 - (hdrlen & 3); 2645 } else 2646 pad = 0; 2647 2648 ring = &sc->txq[buf->ac]; 2649 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2650 desc = &ring->desc[cur]; 2651 data = &ring->data[cur]; 2652 2653 /* Prepare TX firmware command. */ 2654 cmd = &ring->cmd[cur]; 2655 cmd->code = buf->code; 2656 cmd->flags = 0; 2657 cmd->qid = ring->qid; 2658 cmd->idx = cur; 2659 2660 memcpy(cmd->data, buf->data, buf->size); 2661 2662 /* Save and trim IEEE802.11 header. */ 2663 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2664 m_adj(buf->m, hdrlen); 2665 2666 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2667 segs, &nsegs, BUS_DMA_NOWAIT); 2668 if (error != 0 && error != EFBIG) { 2669 device_printf(sc->sc_dev, 2670 "%s: can't map mbuf (error %d)\n", __func__, error); 2671 goto end; 2672 } 2673 if (error != 0) { 2674 /* Too many DMA segments, linearize mbuf. */ 2675 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2676 if (m1 == NULL) { 2677 device_printf(sc->sc_dev, 2678 "%s: could not defrag mbuf\n", __func__); 2679 error = ENOBUFS; 2680 goto end; 2681 } 2682 buf->m = m1; 2683 2684 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2685 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2686 if (__predict_false(error != 0)) { 2687 /* XXX fix this (applicable to the iwn(4) too) */ 2688 /* 2689 * NB: Do not return error; 2690 * original mbuf does not exist anymore. 2691 */ 2692 device_printf(sc->sc_dev, 2693 "%s: can't map mbuf (error %d)\n", __func__, 2694 error); 2695 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2696 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2697 IFCOUNTER_OERRORS, 1); 2698 if (!frag) 2699 ieee80211_free_node(buf->ni); 2700 } 2701 m_freem(buf->m); 2702 error = 0; 2703 goto end; 2704 } 2705 } 2706 2707 KASSERT(nsegs < WPI_MAX_SCATTER, 2708 ("too many DMA segments, nsegs (%d) should be less than %d", 2709 nsegs, WPI_MAX_SCATTER)); 2710 2711 data->m = buf->m; 2712 data->ni = buf->ni; 2713 2714 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2715 __func__, ring->qid, cur, totlen, nsegs); 2716 2717 /* Fill TX descriptor. */ 2718 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2719 /* First DMA segment is used by the TX command. */ 2720 desc->segs[0].addr = htole32(data->cmd_paddr); 2721 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2722 /* Other DMA segments are for data payload. */ 2723 seg = &segs[0]; 2724 for (i = 1; i <= nsegs; i++) { 2725 desc->segs[i].addr = htole32(seg->ds_addr); 2726 desc->segs[i].len = htole32(seg->ds_len); 2727 seg++; 2728 } 2729 2730 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2731 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2732 BUS_DMASYNC_PREWRITE); 2733 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2734 BUS_DMASYNC_PREWRITE); 2735 2736 ring->pending += 1; 2737 2738 if (!frag) { 2739 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2740 WPI_TXQ_STATE_LOCK(sc); 2741 ring->queued += ring->pending; 2742 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2743 sc); 2744 WPI_TXQ_STATE_UNLOCK(sc); 2745 } 2746 2747 /* Kick TX ring. */ 2748 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2749 ring->pending = 0; 2750 sc->sc_update_tx_ring(sc, ring); 2751 } else 2752 ieee80211_node_incref(data->ni); 2753 2754 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2755 __func__); 2756 2757 WPI_TXQ_UNLOCK(sc); 2758 2759 return (error); 2760 } 2761 2762 /* 2763 * Construct the data packet for a transmit buffer. 2764 */ 2765 static int 2766 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2767 { 2768 const struct ieee80211_txparam *tp; 2769 struct ieee80211vap *vap = ni->ni_vap; 2770 struct ieee80211com *ic = ni->ni_ic; 2771 struct wpi_node *wn = WPI_NODE(ni); 2772 struct ieee80211_channel *chan; 2773 struct ieee80211_frame *wh; 2774 struct ieee80211_key *k = NULL; 2775 struct wpi_buf tx_data; 2776 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2777 uint32_t flags; 2778 uint16_t ac, qos; 2779 uint8_t tid, type, rate; 2780 int swcrypt, ismcast, totlen; 2781 2782 wh = mtod(m, struct ieee80211_frame *); 2783 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2784 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2785 swcrypt = 1; 2786 2787 /* Select EDCA Access Category and TX ring for this frame. */ 2788 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2789 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2790 tid = qos & IEEE80211_QOS_TID; 2791 } else { 2792 qos = 0; 2793 tid = 0; 2794 } 2795 ac = M_WME_GETAC(m); 2796 2797 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2798 ni->ni_chan : ic->ic_curchan; 2799 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2800 2801 /* Choose a TX rate index. */ 2802 if (type == IEEE80211_FC0_TYPE_MGT) 2803 rate = tp->mgmtrate; 2804 else if (ismcast) 2805 rate = tp->mcastrate; 2806 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2807 rate = tp->ucastrate; 2808 else if (m->m_flags & M_EAPOL) 2809 rate = tp->mgmtrate; 2810 else { 2811 /* XXX pass pktlen */ 2812 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2813 rate = ni->ni_txrate; 2814 } 2815 2816 /* Encrypt the frame if need be. */ 2817 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2818 /* Retrieve key for TX. */ 2819 k = ieee80211_crypto_encap(ni, m); 2820 if (k == NULL) 2821 return (ENOBUFS); 2822 2823 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2824 2825 /* 802.11 header may have moved. */ 2826 wh = mtod(m, struct ieee80211_frame *); 2827 } 2828 totlen = m->m_pkthdr.len; 2829 2830 if (ieee80211_radiotap_active_vap(vap)) { 2831 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2832 2833 tap->wt_flags = 0; 2834 tap->wt_rate = rate; 2835 if (k != NULL) 2836 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2837 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2838 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2839 2840 ieee80211_radiotap_tx(vap, m); 2841 } 2842 2843 flags = 0; 2844 if (!ismcast) { 2845 /* Unicast frame, check if an ACK is expected. */ 2846 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2847 IEEE80211_QOS_ACKPOLICY_NOACK) 2848 flags |= WPI_TX_NEED_ACK; 2849 } 2850 2851 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2852 flags |= WPI_TX_AUTO_SEQ; 2853 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2854 flags |= WPI_TX_MORE_FRAG; 2855 2856 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2857 if (!ismcast) { 2858 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2859 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2860 flags |= WPI_TX_NEED_RTS; 2861 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2862 WPI_RATE_IS_OFDM(rate)) { 2863 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2864 flags |= WPI_TX_NEED_CTS; 2865 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2866 flags |= WPI_TX_NEED_RTS; 2867 } 2868 2869 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2870 flags |= WPI_TX_FULL_TXOP; 2871 } 2872 2873 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2874 if (type == IEEE80211_FC0_TYPE_MGT) { 2875 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2876 2877 /* Tell HW to set timestamp in probe responses. */ 2878 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2879 flags |= WPI_TX_INSERT_TSTAMP; 2880 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2881 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2882 tx->timeout = htole16(3); 2883 else 2884 tx->timeout = htole16(2); 2885 } 2886 2887 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2888 tx->id = WPI_ID_BROADCAST; 2889 else { 2890 if (wn->id == WPI_ID_UNDEFINED) { 2891 device_printf(sc->sc_dev, 2892 "%s: undefined node id\n", __func__); 2893 return (EINVAL); 2894 } 2895 2896 tx->id = wn->id; 2897 } 2898 2899 if (!swcrypt) { 2900 switch (k->wk_cipher->ic_cipher) { 2901 case IEEE80211_CIPHER_AES_CCM: 2902 tx->security = WPI_CIPHER_CCMP; 2903 break; 2904 2905 default: 2906 break; 2907 } 2908 2909 memcpy(tx->key, k->wk_key, k->wk_keylen); 2910 } 2911 2912 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2913 struct mbuf *next = m->m_nextpkt; 2914 2915 tx->lnext = htole16(next->m_pkthdr.len); 2916 tx->fnext = htole32(tx->security | 2917 (flags & WPI_TX_NEED_ACK) | 2918 WPI_NEXT_STA_ID(tx->id)); 2919 } 2920 2921 tx->len = htole16(totlen); 2922 tx->flags = htole32(flags); 2923 tx->plcp = rate2plcp(rate); 2924 tx->tid = tid; 2925 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2926 tx->ofdm_mask = 0xff; 2927 tx->cck_mask = 0x0f; 2928 tx->rts_ntries = 7; 2929 tx->data_ntries = tp->maxretry; 2930 2931 tx_data.ni = ni; 2932 tx_data.m = m; 2933 tx_data.size = sizeof(struct wpi_cmd_data); 2934 tx_data.code = WPI_CMD_TX_DATA; 2935 tx_data.ac = ac; 2936 2937 return wpi_cmd2(sc, &tx_data); 2938 } 2939 2940 static int 2941 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2942 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2943 { 2944 struct ieee80211vap *vap = ni->ni_vap; 2945 struct ieee80211_key *k = NULL; 2946 struct ieee80211_frame *wh; 2947 struct wpi_buf tx_data; 2948 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2949 uint32_t flags; 2950 uint8_t ac, type, rate; 2951 int swcrypt, totlen; 2952 2953 wh = mtod(m, struct ieee80211_frame *); 2954 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2955 swcrypt = 1; 2956 2957 ac = params->ibp_pri & 3; 2958 2959 /* Choose a TX rate index. */ 2960 rate = params->ibp_rate0; 2961 2962 flags = 0; 2963 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2964 flags |= WPI_TX_AUTO_SEQ; 2965 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2966 flags |= WPI_TX_NEED_ACK; 2967 if (params->ibp_flags & IEEE80211_BPF_RTS) 2968 flags |= WPI_TX_NEED_RTS; 2969 if (params->ibp_flags & IEEE80211_BPF_CTS) 2970 flags |= WPI_TX_NEED_CTS; 2971 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2972 flags |= WPI_TX_FULL_TXOP; 2973 2974 /* Encrypt the frame if need be. */ 2975 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2976 /* Retrieve key for TX. */ 2977 k = ieee80211_crypto_encap(ni, m); 2978 if (k == NULL) 2979 return (ENOBUFS); 2980 2981 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2982 2983 /* 802.11 header may have moved. */ 2984 wh = mtod(m, struct ieee80211_frame *); 2985 } 2986 totlen = m->m_pkthdr.len; 2987 2988 if (ieee80211_radiotap_active_vap(vap)) { 2989 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2990 2991 tap->wt_flags = 0; 2992 tap->wt_rate = rate; 2993 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2994 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2995 2996 ieee80211_radiotap_tx(vap, m); 2997 } 2998 2999 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3000 if (type == IEEE80211_FC0_TYPE_MGT) { 3001 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3002 3003 /* Tell HW to set timestamp in probe responses. */ 3004 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3005 flags |= WPI_TX_INSERT_TSTAMP; 3006 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3007 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3008 tx->timeout = htole16(3); 3009 else 3010 tx->timeout = htole16(2); 3011 } 3012 3013 if (!swcrypt) { 3014 switch (k->wk_cipher->ic_cipher) { 3015 case IEEE80211_CIPHER_AES_CCM: 3016 tx->security = WPI_CIPHER_CCMP; 3017 break; 3018 3019 default: 3020 break; 3021 } 3022 3023 memcpy(tx->key, k->wk_key, k->wk_keylen); 3024 } 3025 3026 tx->len = htole16(totlen); 3027 tx->flags = htole32(flags); 3028 tx->plcp = rate2plcp(rate); 3029 tx->id = WPI_ID_BROADCAST; 3030 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3031 tx->rts_ntries = params->ibp_try1; 3032 tx->data_ntries = params->ibp_try0; 3033 3034 tx_data.ni = ni; 3035 tx_data.m = m; 3036 tx_data.size = sizeof(struct wpi_cmd_data); 3037 tx_data.code = WPI_CMD_TX_DATA; 3038 tx_data.ac = ac; 3039 3040 return wpi_cmd2(sc, &tx_data); 3041 } 3042 3043 static __inline int 3044 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3045 { 3046 struct wpi_tx_ring *ring = &sc->txq[ac]; 3047 int retval; 3048 3049 WPI_TXQ_STATE_LOCK(sc); 3050 retval = WPI_TX_RING_HIMARK - ring->queued; 3051 WPI_TXQ_STATE_UNLOCK(sc); 3052 3053 return retval; 3054 } 3055 3056 static int 3057 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3058 const struct ieee80211_bpf_params *params) 3059 { 3060 struct ieee80211com *ic = ni->ni_ic; 3061 struct wpi_softc *sc = ic->ic_softc; 3062 uint16_t ac; 3063 int error = 0; 3064 3065 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3066 3067 ac = M_WME_GETAC(m); 3068 3069 WPI_TX_LOCK(sc); 3070 3071 /* NB: no fragments here */ 3072 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3073 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3074 goto unlock; 3075 } 3076 3077 if (params == NULL) { 3078 /* 3079 * Legacy path; interpret frame contents to decide 3080 * precisely how to send the frame. 3081 */ 3082 error = wpi_tx_data(sc, m, ni); 3083 } else { 3084 /* 3085 * Caller supplied explicit parameters to use in 3086 * sending the frame. 3087 */ 3088 error = wpi_tx_data_raw(sc, m, ni, params); 3089 } 3090 3091 unlock: WPI_TX_UNLOCK(sc); 3092 3093 if (error != 0) { 3094 m_freem(m); 3095 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3096 3097 return error; 3098 } 3099 3100 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3101 3102 return 0; 3103 } 3104 3105 static int 3106 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3107 { 3108 struct wpi_softc *sc = ic->ic_softc; 3109 struct ieee80211_node *ni; 3110 struct mbuf *mnext; 3111 uint16_t ac; 3112 int error, nmbufs; 3113 3114 WPI_TX_LOCK(sc); 3115 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3116 3117 /* Check if interface is up & running. */ 3118 if (__predict_false(sc->sc_running == 0)) { 3119 error = ENXIO; 3120 goto unlock; 3121 } 3122 3123 nmbufs = 1; 3124 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3125 nmbufs++; 3126 3127 /* Check for available space. */ 3128 ac = M_WME_GETAC(m); 3129 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3130 error = ENOBUFS; 3131 goto unlock; 3132 } 3133 3134 error = 0; 3135 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3136 do { 3137 mnext = m->m_nextpkt; 3138 if (wpi_tx_data(sc, m, ni) != 0) { 3139 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3140 nmbufs); 3141 wpi_free_txfrags(sc, ac); 3142 ieee80211_free_mbuf(m); 3143 ieee80211_free_node(ni); 3144 break; 3145 } 3146 } while((m = mnext) != NULL); 3147 3148 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3149 3150 unlock: WPI_TX_UNLOCK(sc); 3151 3152 return (error); 3153 } 3154 3155 static void 3156 wpi_watchdog_rfkill(void *arg) 3157 { 3158 struct wpi_softc *sc = arg; 3159 struct ieee80211com *ic = &sc->sc_ic; 3160 3161 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3162 3163 /* No need to lock firmware memory. */ 3164 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3165 /* Radio kill switch is still off. */ 3166 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3167 sc); 3168 } else 3169 ieee80211_runtask(ic, &sc->sc_radioon_task); 3170 } 3171 3172 static void 3173 wpi_scan_timeout(void *arg) 3174 { 3175 struct wpi_softc *sc = arg; 3176 struct ieee80211com *ic = &sc->sc_ic; 3177 3178 ic_printf(ic, "scan timeout\n"); 3179 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3180 } 3181 3182 static void 3183 wpi_tx_timeout(void *arg) 3184 { 3185 struct wpi_softc *sc = arg; 3186 struct ieee80211com *ic = &sc->sc_ic; 3187 3188 ic_printf(ic, "device timeout\n"); 3189 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3190 } 3191 3192 static void 3193 wpi_parent(struct ieee80211com *ic) 3194 { 3195 struct wpi_softc *sc = ic->ic_softc; 3196 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3197 3198 if (ic->ic_nrunning > 0) { 3199 if (wpi_init(sc) == 0) { 3200 ieee80211_notify_radio(ic, 1); 3201 ieee80211_start_all(ic); 3202 } else { 3203 ieee80211_notify_radio(ic, 0); 3204 ieee80211_stop(vap); 3205 } 3206 } else 3207 wpi_stop(sc); 3208 } 3209 3210 /* 3211 * Send a command to the firmware. 3212 */ 3213 static int 3214 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3215 int async) 3216 { 3217 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3218 struct wpi_tx_desc *desc; 3219 struct wpi_tx_data *data; 3220 struct wpi_tx_cmd *cmd; 3221 struct mbuf *m; 3222 bus_addr_t paddr; 3223 uint16_t totlen; 3224 int error; 3225 3226 WPI_TXQ_LOCK(sc); 3227 3228 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3229 3230 if (__predict_false(sc->sc_running == 0)) { 3231 /* wpi_stop() was called */ 3232 if (code == WPI_CMD_SCAN) 3233 error = ENETDOWN; 3234 else 3235 error = 0; 3236 3237 goto fail; 3238 } 3239 3240 if (async == 0) 3241 WPI_LOCK_ASSERT(sc); 3242 3243 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3244 __func__, wpi_cmd_str(code), size, async); 3245 3246 desc = &ring->desc[ring->cur]; 3247 data = &ring->data[ring->cur]; 3248 totlen = 4 + size; 3249 3250 if (size > sizeof cmd->data) { 3251 /* Command is too large to fit in a descriptor. */ 3252 if (totlen > MCLBYTES) { 3253 error = EINVAL; 3254 goto fail; 3255 } 3256 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3257 if (m == NULL) { 3258 error = ENOMEM; 3259 goto fail; 3260 } 3261 cmd = mtod(m, struct wpi_tx_cmd *); 3262 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3263 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3264 if (error != 0) { 3265 m_freem(m); 3266 goto fail; 3267 } 3268 data->m = m; 3269 } else { 3270 cmd = &ring->cmd[ring->cur]; 3271 paddr = data->cmd_paddr; 3272 } 3273 3274 cmd->code = code; 3275 cmd->flags = 0; 3276 cmd->qid = ring->qid; 3277 cmd->idx = ring->cur; 3278 memcpy(cmd->data, buf, size); 3279 3280 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3281 desc->segs[0].addr = htole32(paddr); 3282 desc->segs[0].len = htole32(totlen); 3283 3284 if (size > sizeof cmd->data) { 3285 bus_dmamap_sync(ring->data_dmat, data->map, 3286 BUS_DMASYNC_PREWRITE); 3287 } else { 3288 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3289 BUS_DMASYNC_PREWRITE); 3290 } 3291 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3292 BUS_DMASYNC_PREWRITE); 3293 3294 /* Kick command ring. */ 3295 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3296 sc->sc_update_tx_ring(sc, ring); 3297 3298 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3299 3300 WPI_TXQ_UNLOCK(sc); 3301 3302 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3303 3304 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3305 3306 WPI_TXQ_UNLOCK(sc); 3307 3308 return error; 3309 } 3310 3311 /* 3312 * Configure HW multi-rate retries. 3313 */ 3314 static int 3315 wpi_mrr_setup(struct wpi_softc *sc) 3316 { 3317 struct ieee80211com *ic = &sc->sc_ic; 3318 struct wpi_mrr_setup mrr; 3319 uint8_t i; 3320 int error; 3321 3322 /* CCK rates (not used with 802.11a). */ 3323 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3324 mrr.rates[i].flags = 0; 3325 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3326 /* Fallback to the immediate lower CCK rate (if any.) */ 3327 mrr.rates[i].next = 3328 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3329 /* Try twice at this rate before falling back to "next". */ 3330 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3331 } 3332 /* OFDM rates (not used with 802.11b). */ 3333 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3334 mrr.rates[i].flags = 0; 3335 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3336 /* Fallback to the immediate lower rate (if any.) */ 3337 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3338 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3339 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3340 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3341 i - 1; 3342 /* Try twice at this rate before falling back to "next". */ 3343 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3344 } 3345 /* Setup MRR for control frames. */ 3346 mrr.which = htole32(WPI_MRR_CTL); 3347 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3348 if (error != 0) { 3349 device_printf(sc->sc_dev, 3350 "could not setup MRR for control frames\n"); 3351 return error; 3352 } 3353 /* Setup MRR for data frames. */ 3354 mrr.which = htole32(WPI_MRR_DATA); 3355 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3356 if (error != 0) { 3357 device_printf(sc->sc_dev, 3358 "could not setup MRR for data frames\n"); 3359 return error; 3360 } 3361 return 0; 3362 } 3363 3364 static int 3365 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3366 { 3367 struct ieee80211com *ic = ni->ni_ic; 3368 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3369 struct wpi_node *wn = WPI_NODE(ni); 3370 struct wpi_node_info node; 3371 int error; 3372 3373 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3374 3375 if (wn->id == WPI_ID_UNDEFINED) 3376 return EINVAL; 3377 3378 memset(&node, 0, sizeof node); 3379 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3380 node.id = wn->id; 3381 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3382 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3383 node.action = htole32(WPI_ACTION_SET_RATE); 3384 node.antenna = WPI_ANTENNA_BOTH; 3385 3386 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3387 wn->id, ether_sprintf(ni->ni_macaddr)); 3388 3389 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3390 if (error != 0) { 3391 device_printf(sc->sc_dev, 3392 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3393 error); 3394 return error; 3395 } 3396 3397 if (wvp->wv_gtk != 0) { 3398 error = wpi_set_global_keys(ni); 3399 if (error != 0) { 3400 device_printf(sc->sc_dev, 3401 "%s: error while setting global keys\n", __func__); 3402 return ENXIO; 3403 } 3404 } 3405 3406 return 0; 3407 } 3408 3409 /* 3410 * Broadcast node is used to send group-addressed and management frames. 3411 */ 3412 static int 3413 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3414 { 3415 struct ieee80211com *ic = &sc->sc_ic; 3416 struct wpi_node_info node; 3417 3418 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3419 3420 memset(&node, 0, sizeof node); 3421 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3422 node.id = WPI_ID_BROADCAST; 3423 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3424 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3425 node.action = htole32(WPI_ACTION_SET_RATE); 3426 node.antenna = WPI_ANTENNA_BOTH; 3427 3428 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3429 3430 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3431 } 3432 3433 static int 3434 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3435 { 3436 struct wpi_node *wn = WPI_NODE(ni); 3437 int error; 3438 3439 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3440 3441 wn->id = wpi_add_node_entry_sta(sc); 3442 3443 if ((error = wpi_add_node(sc, ni)) != 0) { 3444 wpi_del_node_entry(sc, wn->id); 3445 wn->id = WPI_ID_UNDEFINED; 3446 return error; 3447 } 3448 3449 return 0; 3450 } 3451 3452 static int 3453 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3454 { 3455 struct wpi_node *wn = WPI_NODE(ni); 3456 int error; 3457 3458 KASSERT(wn->id == WPI_ID_UNDEFINED, 3459 ("the node %d was added before", wn->id)); 3460 3461 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3462 3463 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3464 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3465 return ENOMEM; 3466 } 3467 3468 if ((error = wpi_add_node(sc, ni)) != 0) { 3469 wpi_del_node_entry(sc, wn->id); 3470 wn->id = WPI_ID_UNDEFINED; 3471 return error; 3472 } 3473 3474 return 0; 3475 } 3476 3477 static void 3478 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3479 { 3480 struct wpi_node *wn = WPI_NODE(ni); 3481 struct wpi_cmd_del_node node; 3482 int error; 3483 3484 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3485 3486 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3487 3488 memset(&node, 0, sizeof node); 3489 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3490 node.count = 1; 3491 3492 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3493 wn->id, ether_sprintf(ni->ni_macaddr)); 3494 3495 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3496 if (error != 0) { 3497 device_printf(sc->sc_dev, 3498 "%s: could not delete node %u, error %d\n", __func__, 3499 wn->id, error); 3500 } 3501 } 3502 3503 static int 3504 wpi_updateedca(struct ieee80211com *ic) 3505 { 3506 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3507 struct wpi_softc *sc = ic->ic_softc; 3508 struct wpi_edca_params cmd; 3509 int aci, error; 3510 3511 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3512 3513 memset(&cmd, 0, sizeof cmd); 3514 cmd.flags = htole32(WPI_EDCA_UPDATE); 3515 for (aci = 0; aci < WME_NUM_AC; aci++) { 3516 const struct wmeParams *ac = 3517 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3518 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3519 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3520 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3521 cmd.ac[aci].txoplimit = 3522 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3523 3524 DPRINTF(sc, WPI_DEBUG_EDCA, 3525 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3526 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3527 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3528 cmd.ac[aci].txoplimit); 3529 } 3530 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3531 3532 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3533 3534 return error; 3535 #undef WPI_EXP2 3536 } 3537 3538 static void 3539 wpi_set_promisc(struct wpi_softc *sc) 3540 { 3541 struct ieee80211com *ic = &sc->sc_ic; 3542 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3543 uint32_t promisc_filter; 3544 3545 promisc_filter = WPI_FILTER_CTL; 3546 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3547 promisc_filter |= WPI_FILTER_PROMISC; 3548 3549 if (ic->ic_promisc > 0) 3550 sc->rxon.filter |= htole32(promisc_filter); 3551 else 3552 sc->rxon.filter &= ~htole32(promisc_filter); 3553 } 3554 3555 static void 3556 wpi_update_promisc(struct ieee80211com *ic) 3557 { 3558 struct wpi_softc *sc = ic->ic_softc; 3559 3560 WPI_LOCK(sc); 3561 if (sc->sc_running == 0) { 3562 WPI_UNLOCK(sc); 3563 return; 3564 } 3565 WPI_UNLOCK(sc); 3566 3567 WPI_RXON_LOCK(sc); 3568 wpi_set_promisc(sc); 3569 3570 if (wpi_send_rxon(sc, 1, 1) != 0) { 3571 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3572 __func__); 3573 } 3574 WPI_RXON_UNLOCK(sc); 3575 } 3576 3577 static void 3578 wpi_update_mcast(struct ieee80211com *ic) 3579 { 3580 /* Ignore */ 3581 } 3582 3583 static void 3584 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3585 { 3586 struct wpi_cmd_led led; 3587 3588 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3589 3590 led.which = which; 3591 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3592 led.off = off; 3593 led.on = on; 3594 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3595 } 3596 3597 static int 3598 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3599 { 3600 struct wpi_cmd_timing cmd; 3601 uint64_t val, mod; 3602 3603 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3604 3605 memset(&cmd, 0, sizeof cmd); 3606 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3607 cmd.bintval = htole16(ni->ni_intval); 3608 cmd.lintval = htole16(10); 3609 3610 /* Compute remaining time until next beacon. */ 3611 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3612 mod = le64toh(cmd.tstamp) % val; 3613 cmd.binitval = htole32((uint32_t)(val - mod)); 3614 3615 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3616 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3617 3618 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3619 } 3620 3621 /* 3622 * This function is called periodically (every 60 seconds) to adjust output 3623 * power to temperature changes. 3624 */ 3625 static void 3626 wpi_power_calibration(struct wpi_softc *sc) 3627 { 3628 int temp; 3629 3630 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3631 3632 /* Update sensor data. */ 3633 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3634 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3635 3636 /* Sanity-check read value. */ 3637 if (temp < -260 || temp > 25) { 3638 /* This can't be correct, ignore. */ 3639 DPRINTF(sc, WPI_DEBUG_TEMP, 3640 "out-of-range temperature reported: %d\n", temp); 3641 return; 3642 } 3643 3644 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3645 3646 /* Adjust Tx power if need be. */ 3647 if (abs(temp - sc->temp) <= 6) 3648 return; 3649 3650 sc->temp = temp; 3651 3652 if (wpi_set_txpower(sc, 1) != 0) { 3653 /* just warn, too bad for the automatic calibration... */ 3654 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3655 } 3656 } 3657 3658 /* 3659 * Set TX power for current channel. 3660 */ 3661 static int 3662 wpi_set_txpower(struct wpi_softc *sc, int async) 3663 { 3664 struct wpi_power_group *group; 3665 struct wpi_cmd_txpower cmd; 3666 uint8_t chan; 3667 int idx, is_chan_5ghz, i; 3668 3669 /* Retrieve current channel from last RXON. */ 3670 chan = sc->rxon.chan; 3671 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3672 3673 /* Find the TX power group to which this channel belongs. */ 3674 if (is_chan_5ghz) { 3675 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3676 if (chan <= group->chan) 3677 break; 3678 } else 3679 group = &sc->groups[0]; 3680 3681 memset(&cmd, 0, sizeof cmd); 3682 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3683 cmd.chan = htole16(chan); 3684 3685 /* Set TX power for all OFDM and CCK rates. */ 3686 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3687 /* Retrieve TX power for this channel/rate. */ 3688 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3689 3690 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3691 3692 if (is_chan_5ghz) { 3693 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3694 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3695 } else { 3696 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3697 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3698 } 3699 DPRINTF(sc, WPI_DEBUG_TEMP, 3700 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3701 } 3702 3703 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3704 } 3705 3706 /* 3707 * Determine Tx power index for a given channel/rate combination. 3708 * This takes into account the regulatory information from EEPROM and the 3709 * current temperature. 3710 */ 3711 static int 3712 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3713 uint8_t chan, int is_chan_5ghz, int ridx) 3714 { 3715 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3716 #define fdivround(a, b, n) \ 3717 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3718 3719 /* Linear interpolation. */ 3720 #define interpolate(x, x1, y1, x2, y2, n) \ 3721 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3722 3723 struct wpi_power_sample *sample; 3724 int pwr, idx; 3725 3726 /* Default TX power is group maximum TX power minus 3dB. */ 3727 pwr = group->maxpwr / 2; 3728 3729 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3730 switch (ridx) { 3731 case WPI_RIDX_OFDM36: 3732 pwr -= is_chan_5ghz ? 5 : 0; 3733 break; 3734 case WPI_RIDX_OFDM48: 3735 pwr -= is_chan_5ghz ? 10 : 7; 3736 break; 3737 case WPI_RIDX_OFDM54: 3738 pwr -= is_chan_5ghz ? 12 : 9; 3739 break; 3740 } 3741 3742 /* Never exceed the channel maximum allowed TX power. */ 3743 pwr = min(pwr, sc->maxpwr[chan]); 3744 3745 /* Retrieve TX power index into gain tables from samples. */ 3746 for (sample = group->samples; sample < &group->samples[3]; sample++) 3747 if (pwr > sample[1].power) 3748 break; 3749 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3750 idx = interpolate(pwr, sample[0].power, sample[0].index, 3751 sample[1].power, sample[1].index, 19); 3752 3753 /*- 3754 * Adjust power index based on current temperature: 3755 * - if cooler than factory-calibrated: decrease output power 3756 * - if warmer than factory-calibrated: increase output power 3757 */ 3758 idx -= (sc->temp - group->temp) * 11 / 100; 3759 3760 /* Decrease TX power for CCK rates (-5dB). */ 3761 if (ridx >= WPI_RIDX_CCK1) 3762 idx += 10; 3763 3764 /* Make sure idx stays in a valid range. */ 3765 if (idx < 0) 3766 return 0; 3767 if (idx > WPI_MAX_PWR_INDEX) 3768 return WPI_MAX_PWR_INDEX; 3769 return idx; 3770 3771 #undef interpolate 3772 #undef fdivround 3773 } 3774 3775 /* 3776 * Set STA mode power saving level (between 0 and 5). 3777 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3778 */ 3779 static int 3780 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3781 { 3782 struct wpi_pmgt_cmd cmd; 3783 const struct wpi_pmgt *pmgt; 3784 uint32_t max, reg; 3785 uint8_t skip_dtim; 3786 int i; 3787 3788 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3789 "%s: dtim=%d, level=%d, async=%d\n", 3790 __func__, dtim, level, async); 3791 3792 /* Select which PS parameters to use. */ 3793 if (dtim <= 10) 3794 pmgt = &wpi_pmgt[0][level]; 3795 else 3796 pmgt = &wpi_pmgt[1][level]; 3797 3798 memset(&cmd, 0, sizeof cmd); 3799 if (level != 0) /* not CAM */ 3800 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3801 /* Retrieve PCIe Active State Power Management (ASPM). */ 3802 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3803 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3804 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3805 3806 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3807 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3808 3809 if (dtim == 0) { 3810 dtim = 1; 3811 skip_dtim = 0; 3812 } else 3813 skip_dtim = pmgt->skip_dtim; 3814 3815 if (skip_dtim != 0) { 3816 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3817 max = pmgt->intval[4]; 3818 if (max == (uint32_t)-1) 3819 max = dtim * (skip_dtim + 1); 3820 else if (max > dtim) 3821 max = (max / dtim) * dtim; 3822 } else 3823 max = dtim; 3824 3825 for (i = 0; i < 5; i++) 3826 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3827 3828 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3829 } 3830 3831 static int 3832 wpi_send_btcoex(struct wpi_softc *sc) 3833 { 3834 struct wpi_bluetooth cmd; 3835 3836 memset(&cmd, 0, sizeof cmd); 3837 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3838 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3839 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3840 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3841 __func__); 3842 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3843 } 3844 3845 static int 3846 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3847 { 3848 int error; 3849 3850 if (async) 3851 WPI_RXON_LOCK_ASSERT(sc); 3852 3853 if (assoc && wpi_check_bss_filter(sc) != 0) { 3854 struct wpi_assoc rxon_assoc; 3855 3856 rxon_assoc.flags = sc->rxon.flags; 3857 rxon_assoc.filter = sc->rxon.filter; 3858 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3859 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3860 rxon_assoc.reserved = 0; 3861 3862 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3863 sizeof (struct wpi_assoc), async); 3864 if (error != 0) { 3865 device_printf(sc->sc_dev, 3866 "RXON_ASSOC command failed, error %d\n", error); 3867 return error; 3868 } 3869 } else { 3870 if (async) { 3871 WPI_NT_LOCK(sc); 3872 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3873 sizeof (struct wpi_rxon), async); 3874 if (error == 0) 3875 wpi_clear_node_table(sc); 3876 WPI_NT_UNLOCK(sc); 3877 } else { 3878 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3879 sizeof (struct wpi_rxon), async); 3880 if (error == 0) 3881 wpi_clear_node_table(sc); 3882 } 3883 3884 if (error != 0) { 3885 device_printf(sc->sc_dev, 3886 "RXON command failed, error %d\n", error); 3887 return error; 3888 } 3889 3890 /* Add broadcast node. */ 3891 error = wpi_add_broadcast_node(sc, async); 3892 if (error != 0) { 3893 device_printf(sc->sc_dev, 3894 "could not add broadcast node, error %d\n", error); 3895 return error; 3896 } 3897 } 3898 3899 /* Configuration has changed, set Tx power accordingly. */ 3900 if ((error = wpi_set_txpower(sc, async)) != 0) { 3901 device_printf(sc->sc_dev, 3902 "%s: could not set TX power, error %d\n", __func__, error); 3903 return error; 3904 } 3905 3906 return 0; 3907 } 3908 3909 /** 3910 * Configure the card to listen to a particular channel, this transisions the 3911 * card in to being able to receive frames from remote devices. 3912 */ 3913 static int 3914 wpi_config(struct wpi_softc *sc) 3915 { 3916 struct ieee80211com *ic = &sc->sc_ic; 3917 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3918 struct ieee80211_channel *c = ic->ic_curchan; 3919 int error; 3920 3921 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3922 3923 /* Set power saving level to CAM during initialization. */ 3924 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3925 device_printf(sc->sc_dev, 3926 "%s: could not set power saving level\n", __func__); 3927 return error; 3928 } 3929 3930 /* Configure bluetooth coexistence. */ 3931 if ((error = wpi_send_btcoex(sc)) != 0) { 3932 device_printf(sc->sc_dev, 3933 "could not configure bluetooth coexistence\n"); 3934 return error; 3935 } 3936 3937 /* Configure adapter. */ 3938 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3939 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3940 3941 /* Set default channel. */ 3942 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3943 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3944 if (IEEE80211_IS_CHAN_2GHZ(c)) 3945 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3946 3947 sc->rxon.filter = WPI_FILTER_MULTICAST; 3948 switch (ic->ic_opmode) { 3949 case IEEE80211_M_STA: 3950 sc->rxon.mode = WPI_MODE_STA; 3951 break; 3952 case IEEE80211_M_IBSS: 3953 sc->rxon.mode = WPI_MODE_IBSS; 3954 sc->rxon.filter |= WPI_FILTER_BEACON; 3955 break; 3956 case IEEE80211_M_HOSTAP: 3957 /* XXX workaround for beaconing */ 3958 sc->rxon.mode = WPI_MODE_IBSS; 3959 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3960 break; 3961 case IEEE80211_M_AHDEMO: 3962 sc->rxon.mode = WPI_MODE_HOSTAP; 3963 break; 3964 case IEEE80211_M_MONITOR: 3965 sc->rxon.mode = WPI_MODE_MONITOR; 3966 break; 3967 default: 3968 device_printf(sc->sc_dev, "unknown opmode %d\n", 3969 ic->ic_opmode); 3970 return EINVAL; 3971 } 3972 sc->rxon.filter = htole32(sc->rxon.filter); 3973 wpi_set_promisc(sc); 3974 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3975 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3976 3977 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3978 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3979 __func__); 3980 return error; 3981 } 3982 3983 /* Setup rate scalling. */ 3984 if ((error = wpi_mrr_setup(sc)) != 0) { 3985 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3986 error); 3987 return error; 3988 } 3989 3990 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3991 3992 return 0; 3993 } 3994 3995 static uint16_t 3996 wpi_get_active_dwell_time(struct wpi_softc *sc, 3997 struct ieee80211_channel *c, uint8_t n_probes) 3998 { 3999 /* No channel? Default to 2GHz settings. */ 4000 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4001 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4002 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4003 } 4004 4005 /* 5GHz dwell time. */ 4006 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4007 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4008 } 4009 4010 /* 4011 * Limit the total dwell time. 4012 * 4013 * Returns the dwell time in milliseconds. 4014 */ 4015 static uint16_t 4016 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4017 { 4018 struct ieee80211com *ic = &sc->sc_ic; 4019 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4020 uint16_t bintval = 0; 4021 4022 /* bintval is in TU (1.024mS) */ 4023 if (vap != NULL) 4024 bintval = vap->iv_bss->ni_intval; 4025 4026 /* 4027 * If it's non-zero, we should calculate the minimum of 4028 * it and the DWELL_BASE. 4029 * 4030 * XXX Yes, the math should take into account that bintval 4031 * is 1.024mS, not 1mS.. 4032 */ 4033 if (bintval > 0) { 4034 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4035 bintval); 4036 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4037 } 4038 4039 /* No association context? Default. */ 4040 return dwell_time; 4041 } 4042 4043 static uint16_t 4044 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4045 { 4046 uint16_t passive; 4047 4048 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4049 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4050 else 4051 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4052 4053 /* Clamp to the beacon interval if we're associated. */ 4054 return (wpi_limit_dwell(sc, passive)); 4055 } 4056 4057 static uint32_t 4058 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4059 { 4060 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4061 uint32_t nbeacons = time / bintval; 4062 4063 if (mod > WPI_PAUSE_MAX_TIME) 4064 mod = WPI_PAUSE_MAX_TIME; 4065 4066 return WPI_PAUSE_SCAN(nbeacons, mod); 4067 } 4068 4069 /* 4070 * Send a scan request to the firmware. 4071 */ 4072 static int 4073 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4074 { 4075 struct ieee80211com *ic = &sc->sc_ic; 4076 struct ieee80211_scan_state *ss = ic->ic_scan; 4077 struct ieee80211vap *vap = ss->ss_vap; 4078 struct wpi_scan_hdr *hdr; 4079 struct wpi_cmd_data *tx; 4080 struct wpi_scan_essid *essids; 4081 struct wpi_scan_chan *chan; 4082 struct ieee80211_frame *wh; 4083 struct ieee80211_rateset *rs; 4084 uint16_t bintval, buflen, dwell_active, dwell_passive; 4085 uint8_t *buf, *frm, i, nssid; 4086 int bgscan, error; 4087 4088 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4089 4090 /* 4091 * We are absolutely not allowed to send a scan command when another 4092 * scan command is pending. 4093 */ 4094 if (callout_pending(&sc->scan_timeout)) { 4095 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4096 __func__); 4097 error = EAGAIN; 4098 goto fail; 4099 } 4100 4101 bgscan = wpi_check_bss_filter(sc); 4102 bintval = vap->iv_bss->ni_intval; 4103 if (bgscan != 0 && 4104 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4105 error = EOPNOTSUPP; 4106 goto fail; 4107 } 4108 4109 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4110 if (buf == NULL) { 4111 device_printf(sc->sc_dev, 4112 "%s: could not allocate buffer for scan command\n", 4113 __func__); 4114 error = ENOMEM; 4115 goto fail; 4116 } 4117 hdr = (struct wpi_scan_hdr *)buf; 4118 4119 /* 4120 * Move to the next channel if no packets are received within 10 msecs 4121 * after sending the probe request. 4122 */ 4123 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4124 hdr->quiet_threshold = htole16(1); 4125 4126 if (bgscan != 0) { 4127 /* 4128 * Max needs to be greater than active and passive and quiet! 4129 * It's also in microseconds! 4130 */ 4131 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4132 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4133 bintval)); 4134 } 4135 4136 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4137 4138 tx = (struct wpi_cmd_data *)(hdr + 1); 4139 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4140 tx->id = WPI_ID_BROADCAST; 4141 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4142 4143 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4144 /* Send probe requests at 6Mbps. */ 4145 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4146 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4147 } else { 4148 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4149 /* Send probe requests at 1Mbps. */ 4150 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4151 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4152 } 4153 4154 essids = (struct wpi_scan_essid *)(tx + 1); 4155 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4156 for (i = 0; i < nssid; i++) { 4157 essids[i].id = IEEE80211_ELEMID_SSID; 4158 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4159 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4160 #ifdef WPI_DEBUG 4161 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4162 printf("Scanning Essid: "); 4163 ieee80211_print_essid(essids[i].data, essids[i].len); 4164 printf("\n"); 4165 } 4166 #endif 4167 } 4168 4169 /* 4170 * Build a probe request frame. Most of the following code is a 4171 * copy & paste of what is done in net80211. 4172 */ 4173 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4174 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4175 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4176 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4177 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4178 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4179 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4180 4181 frm = (uint8_t *)(wh + 1); 4182 frm = ieee80211_add_ssid(frm, NULL, 0); 4183 frm = ieee80211_add_rates(frm, rs); 4184 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4185 frm = ieee80211_add_xrates(frm, rs); 4186 4187 /* Set length of probe request. */ 4188 tx->len = htole16(frm - (uint8_t *)wh); 4189 4190 /* 4191 * Construct information about the channel that we 4192 * want to scan. The firmware expects this to be directly 4193 * after the scan probe request 4194 */ 4195 chan = (struct wpi_scan_chan *)frm; 4196 chan->chan = ieee80211_chan2ieee(ic, c); 4197 chan->flags = 0; 4198 if (nssid) { 4199 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4200 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4201 } else 4202 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4203 4204 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4205 chan->flags |= WPI_CHAN_ACTIVE; 4206 4207 /* 4208 * Calculate the active/passive dwell times. 4209 */ 4210 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4211 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4212 4213 /* Make sure they're valid. */ 4214 if (dwell_active > dwell_passive) 4215 dwell_active = dwell_passive; 4216 4217 chan->active = htole16(dwell_active); 4218 chan->passive = htole16(dwell_passive); 4219 4220 chan->dsp_gain = 0x6e; /* Default level */ 4221 4222 if (IEEE80211_IS_CHAN_5GHZ(c)) 4223 chan->rf_gain = 0x3b; 4224 else 4225 chan->rf_gain = 0x28; 4226 4227 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4228 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4229 4230 hdr->nchan++; 4231 4232 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4233 /* XXX Force probe request transmission. */ 4234 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4235 4236 chan++; 4237 4238 /* Reduce unnecessary delay. */ 4239 chan->flags = 0; 4240 chan->passive = chan->active = hdr->quiet_time; 4241 4242 hdr->nchan++; 4243 } 4244 4245 chan++; 4246 4247 buflen = (uint8_t *)chan - buf; 4248 hdr->len = htole16(buflen); 4249 4250 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4251 hdr->nchan); 4252 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4253 free(buf, M_DEVBUF); 4254 4255 if (error != 0) 4256 goto fail; 4257 4258 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4259 4260 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4261 4262 return 0; 4263 4264 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4265 4266 return error; 4267 } 4268 4269 static int 4270 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4271 { 4272 struct ieee80211com *ic = vap->iv_ic; 4273 struct ieee80211_node *ni = vap->iv_bss; 4274 struct ieee80211_channel *c = ni->ni_chan; 4275 int error; 4276 4277 WPI_RXON_LOCK(sc); 4278 4279 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4280 4281 /* Update adapter configuration. */ 4282 sc->rxon.associd = 0; 4283 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4284 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4285 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4286 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4287 if (IEEE80211_IS_CHAN_2GHZ(c)) 4288 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4289 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4290 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4291 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4292 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4293 if (IEEE80211_IS_CHAN_A(c)) { 4294 sc->rxon.cck_mask = 0; 4295 sc->rxon.ofdm_mask = 0x15; 4296 } else if (IEEE80211_IS_CHAN_B(c)) { 4297 sc->rxon.cck_mask = 0x03; 4298 sc->rxon.ofdm_mask = 0; 4299 } else { 4300 /* Assume 802.11b/g. */ 4301 sc->rxon.cck_mask = 0x0f; 4302 sc->rxon.ofdm_mask = 0x15; 4303 } 4304 4305 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4306 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4307 sc->rxon.ofdm_mask); 4308 4309 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4310 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4311 __func__); 4312 } 4313 4314 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4315 4316 WPI_RXON_UNLOCK(sc); 4317 4318 return error; 4319 } 4320 4321 static int 4322 wpi_config_beacon(struct wpi_vap *wvp) 4323 { 4324 struct ieee80211vap *vap = &wvp->wv_vap; 4325 struct ieee80211com *ic = vap->iv_ic; 4326 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4327 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4328 struct wpi_softc *sc = ic->ic_softc; 4329 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4330 struct ieee80211_tim_ie *tie; 4331 struct mbuf *m; 4332 uint8_t *ptr; 4333 int error; 4334 4335 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4336 4337 WPI_VAP_LOCK_ASSERT(wvp); 4338 4339 cmd->len = htole16(bcn->m->m_pkthdr.len); 4340 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4341 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4342 4343 /* XXX seems to be unused */ 4344 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4345 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4346 ptr = mtod(bcn->m, uint8_t *); 4347 4348 cmd->tim = htole16(bo->bo_tim - ptr); 4349 cmd->timsz = tie->tim_len; 4350 } 4351 4352 /* Necessary for recursion in ieee80211_beacon_update(). */ 4353 m = bcn->m; 4354 bcn->m = m_dup(m, M_NOWAIT); 4355 if (bcn->m == NULL) { 4356 device_printf(sc->sc_dev, 4357 "%s: could not copy beacon frame\n", __func__); 4358 error = ENOMEM; 4359 goto end; 4360 } 4361 4362 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4363 device_printf(sc->sc_dev, 4364 "%s: could not update beacon frame, error %d", __func__, 4365 error); 4366 m_freem(bcn->m); 4367 } 4368 4369 /* Restore mbuf. */ 4370 end: bcn->m = m; 4371 4372 return error; 4373 } 4374 4375 static int 4376 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4377 { 4378 struct ieee80211vap *vap = ni->ni_vap; 4379 struct wpi_vap *wvp = WPI_VAP(vap); 4380 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4381 struct mbuf *m; 4382 int error; 4383 4384 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4385 4386 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4387 return EINVAL; 4388 4389 m = ieee80211_beacon_alloc(ni); 4390 if (m == NULL) { 4391 device_printf(sc->sc_dev, 4392 "%s: could not allocate beacon frame\n", __func__); 4393 return ENOMEM; 4394 } 4395 4396 WPI_VAP_LOCK(wvp); 4397 if (bcn->m != NULL) 4398 m_freem(bcn->m); 4399 4400 bcn->m = m; 4401 4402 error = wpi_config_beacon(wvp); 4403 WPI_VAP_UNLOCK(wvp); 4404 4405 return error; 4406 } 4407 4408 static void 4409 wpi_update_beacon(struct ieee80211vap *vap, int item) 4410 { 4411 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4412 struct wpi_vap *wvp = WPI_VAP(vap); 4413 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4414 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4415 struct ieee80211_node *ni = vap->iv_bss; 4416 int mcast = 0; 4417 4418 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4419 4420 WPI_VAP_LOCK(wvp); 4421 if (bcn->m == NULL) { 4422 bcn->m = ieee80211_beacon_alloc(ni); 4423 if (bcn->m == NULL) { 4424 device_printf(sc->sc_dev, 4425 "%s: could not allocate beacon frame\n", __func__); 4426 4427 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4428 __func__); 4429 4430 WPI_VAP_UNLOCK(wvp); 4431 return; 4432 } 4433 } 4434 WPI_VAP_UNLOCK(wvp); 4435 4436 if (item == IEEE80211_BEACON_TIM) 4437 mcast = 1; /* TODO */ 4438 4439 setbit(bo->bo_flags, item); 4440 ieee80211_beacon_update(ni, bcn->m, mcast); 4441 4442 WPI_VAP_LOCK(wvp); 4443 wpi_config_beacon(wvp); 4444 WPI_VAP_UNLOCK(wvp); 4445 4446 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4447 } 4448 4449 static void 4450 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4451 { 4452 struct ieee80211vap *vap = ni->ni_vap; 4453 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4454 struct wpi_node *wn = WPI_NODE(ni); 4455 int error; 4456 4457 WPI_NT_LOCK(sc); 4458 4459 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4460 4461 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4462 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4463 device_printf(sc->sc_dev, 4464 "%s: could not add IBSS node, error %d\n", 4465 __func__, error); 4466 } 4467 } 4468 WPI_NT_UNLOCK(sc); 4469 } 4470 4471 static int 4472 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4473 { 4474 struct ieee80211com *ic = vap->iv_ic; 4475 struct ieee80211_node *ni = vap->iv_bss; 4476 struct ieee80211_channel *c = ni->ni_chan; 4477 int error; 4478 4479 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4480 4481 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4482 /* Link LED blinks while monitoring. */ 4483 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4484 return 0; 4485 } 4486 4487 /* XXX kernel panic workaround */ 4488 if (c == IEEE80211_CHAN_ANYC) { 4489 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4490 __func__); 4491 return EINVAL; 4492 } 4493 4494 if ((error = wpi_set_timing(sc, ni)) != 0) { 4495 device_printf(sc->sc_dev, 4496 "%s: could not set timing, error %d\n", __func__, error); 4497 return error; 4498 } 4499 4500 /* Update adapter configuration. */ 4501 WPI_RXON_LOCK(sc); 4502 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4503 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4504 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4505 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4506 if (IEEE80211_IS_CHAN_2GHZ(c)) 4507 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4508 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4509 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4510 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4511 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4512 if (IEEE80211_IS_CHAN_A(c)) { 4513 sc->rxon.cck_mask = 0; 4514 sc->rxon.ofdm_mask = 0x15; 4515 } else if (IEEE80211_IS_CHAN_B(c)) { 4516 sc->rxon.cck_mask = 0x03; 4517 sc->rxon.ofdm_mask = 0; 4518 } else { 4519 /* Assume 802.11b/g. */ 4520 sc->rxon.cck_mask = 0x0f; 4521 sc->rxon.ofdm_mask = 0x15; 4522 } 4523 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4524 4525 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4526 sc->rxon.chan, sc->rxon.flags); 4527 4528 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4529 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4530 __func__); 4531 return error; 4532 } 4533 4534 /* Start periodic calibration timer. */ 4535 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4536 4537 WPI_RXON_UNLOCK(sc); 4538 4539 if (vap->iv_opmode == IEEE80211_M_IBSS || 4540 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4541 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4542 device_printf(sc->sc_dev, 4543 "%s: could not setup beacon, error %d\n", __func__, 4544 error); 4545 return error; 4546 } 4547 } 4548 4549 if (vap->iv_opmode == IEEE80211_M_STA) { 4550 /* Add BSS node. */ 4551 WPI_NT_LOCK(sc); 4552 error = wpi_add_sta_node(sc, ni); 4553 WPI_NT_UNLOCK(sc); 4554 if (error != 0) { 4555 device_printf(sc->sc_dev, 4556 "%s: could not add BSS node, error %d\n", __func__, 4557 error); 4558 return error; 4559 } 4560 } 4561 4562 /* Link LED always on while associated. */ 4563 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4564 4565 /* Enable power-saving mode if requested by user. */ 4566 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4567 vap->iv_opmode != IEEE80211_M_IBSS) 4568 (void)wpi_set_pslevel(sc, 0, 3, 1); 4569 4570 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4571 4572 return 0; 4573 } 4574 4575 static int 4576 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4577 { 4578 const struct ieee80211_cipher *cip = k->wk_cipher; 4579 struct ieee80211vap *vap = ni->ni_vap; 4580 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4581 struct wpi_node *wn = WPI_NODE(ni); 4582 struct wpi_node_info node; 4583 uint16_t kflags; 4584 int error; 4585 4586 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4587 4588 if (wpi_check_node_entry(sc, wn->id) == 0) { 4589 device_printf(sc->sc_dev, "%s: node does not exist\n", 4590 __func__); 4591 return 0; 4592 } 4593 4594 switch (cip->ic_cipher) { 4595 case IEEE80211_CIPHER_AES_CCM: 4596 kflags = WPI_KFLAG_CCMP; 4597 break; 4598 4599 default: 4600 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4601 cip->ic_cipher); 4602 return 0; 4603 } 4604 4605 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4606 if (k->wk_flags & IEEE80211_KEY_GROUP) 4607 kflags |= WPI_KFLAG_MULTICAST; 4608 4609 memset(&node, 0, sizeof node); 4610 node.id = wn->id; 4611 node.control = WPI_NODE_UPDATE; 4612 node.flags = WPI_FLAG_KEY_SET; 4613 node.kflags = htole16(kflags); 4614 memcpy(node.key, k->wk_key, k->wk_keylen); 4615 again: 4616 DPRINTF(sc, WPI_DEBUG_KEY, 4617 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4618 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4619 node.id, ether_sprintf(ni->ni_macaddr)); 4620 4621 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4622 if (error != 0) { 4623 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4624 error); 4625 return !error; 4626 } 4627 4628 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4629 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4630 kflags |= WPI_KFLAG_MULTICAST; 4631 node.kflags = htole16(kflags); 4632 4633 goto again; 4634 } 4635 4636 return 1; 4637 } 4638 4639 static void 4640 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4641 { 4642 const struct ieee80211_key *k = arg; 4643 struct ieee80211vap *vap = ni->ni_vap; 4644 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4645 struct wpi_node *wn = WPI_NODE(ni); 4646 int error; 4647 4648 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4649 return; 4650 4651 WPI_NT_LOCK(sc); 4652 error = wpi_load_key(ni, k); 4653 WPI_NT_UNLOCK(sc); 4654 4655 if (error == 0) { 4656 device_printf(sc->sc_dev, "%s: error while setting key\n", 4657 __func__); 4658 } 4659 } 4660 4661 static int 4662 wpi_set_global_keys(struct ieee80211_node *ni) 4663 { 4664 struct ieee80211vap *vap = ni->ni_vap; 4665 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4666 int error = 1; 4667 4668 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4669 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4670 error = wpi_load_key(ni, wk); 4671 4672 return !error; 4673 } 4674 4675 static int 4676 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4677 { 4678 struct ieee80211vap *vap = ni->ni_vap; 4679 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4680 struct wpi_node *wn = WPI_NODE(ni); 4681 struct wpi_node_info node; 4682 uint16_t kflags; 4683 int error; 4684 4685 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4686 4687 if (wpi_check_node_entry(sc, wn->id) == 0) { 4688 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4689 return 1; /* Nothing to do. */ 4690 } 4691 4692 kflags = WPI_KFLAG_KID(k->wk_keyix); 4693 if (k->wk_flags & IEEE80211_KEY_GROUP) 4694 kflags |= WPI_KFLAG_MULTICAST; 4695 4696 memset(&node, 0, sizeof node); 4697 node.id = wn->id; 4698 node.control = WPI_NODE_UPDATE; 4699 node.flags = WPI_FLAG_KEY_SET; 4700 node.kflags = htole16(kflags); 4701 again: 4702 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4703 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4704 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4705 4706 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4707 if (error != 0) { 4708 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4709 error); 4710 return !error; 4711 } 4712 4713 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4714 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4715 kflags |= WPI_KFLAG_MULTICAST; 4716 node.kflags = htole16(kflags); 4717 4718 goto again; 4719 } 4720 4721 return 1; 4722 } 4723 4724 static void 4725 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4726 { 4727 const struct ieee80211_key *k = arg; 4728 struct ieee80211vap *vap = ni->ni_vap; 4729 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4730 struct wpi_node *wn = WPI_NODE(ni); 4731 int error; 4732 4733 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4734 return; 4735 4736 WPI_NT_LOCK(sc); 4737 error = wpi_del_key(ni, k); 4738 WPI_NT_UNLOCK(sc); 4739 4740 if (error == 0) { 4741 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4742 __func__); 4743 } 4744 } 4745 4746 static int 4747 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4748 int set) 4749 { 4750 struct ieee80211com *ic = vap->iv_ic; 4751 struct wpi_softc *sc = ic->ic_softc; 4752 struct wpi_vap *wvp = WPI_VAP(vap); 4753 struct ieee80211_node *ni; 4754 int error, ni_ref = 0; 4755 4756 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4757 4758 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4759 /* Not for us. */ 4760 return 1; 4761 } 4762 4763 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4764 /* XMIT keys are handled in wpi_tx_data(). */ 4765 return 1; 4766 } 4767 4768 /* Handle group keys. */ 4769 if (&vap->iv_nw_keys[0] <= k && 4770 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4771 WPI_NT_LOCK(sc); 4772 if (set) 4773 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4774 else 4775 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4776 WPI_NT_UNLOCK(sc); 4777 4778 if (vap->iv_state == IEEE80211_S_RUN) { 4779 ieee80211_iterate_nodes(&ic->ic_sta, 4780 set ? wpi_load_key_cb : wpi_del_key_cb, 4781 __DECONST(void *, k)); 4782 } 4783 4784 return 1; 4785 } 4786 4787 switch (vap->iv_opmode) { 4788 case IEEE80211_M_STA: 4789 ni = vap->iv_bss; 4790 break; 4791 4792 case IEEE80211_M_IBSS: 4793 case IEEE80211_M_AHDEMO: 4794 case IEEE80211_M_HOSTAP: 4795 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4796 if (ni == NULL) 4797 return 0; /* should not happen */ 4798 4799 ni_ref = 1; 4800 break; 4801 4802 default: 4803 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4804 vap->iv_opmode); 4805 return 0; 4806 } 4807 4808 WPI_NT_LOCK(sc); 4809 if (set) 4810 error = wpi_load_key(ni, k); 4811 else 4812 error = wpi_del_key(ni, k); 4813 WPI_NT_UNLOCK(sc); 4814 4815 if (ni_ref) 4816 ieee80211_node_decref(ni); 4817 4818 return error; 4819 } 4820 4821 static int 4822 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4823 { 4824 return wpi_process_key(vap, k, 1); 4825 } 4826 4827 static int 4828 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4829 { 4830 return wpi_process_key(vap, k, 0); 4831 } 4832 4833 /* 4834 * This function is called after the runtime firmware notifies us of its 4835 * readiness (called in a process context). 4836 */ 4837 static int 4838 wpi_post_alive(struct wpi_softc *sc) 4839 { 4840 int ntries, error; 4841 4842 /* Check (again) that the radio is not disabled. */ 4843 if ((error = wpi_nic_lock(sc)) != 0) 4844 return error; 4845 4846 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4847 4848 /* NB: Runtime firmware must be up and running. */ 4849 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4850 device_printf(sc->sc_dev, 4851 "RF switch: radio disabled (%s)\n", __func__); 4852 wpi_nic_unlock(sc); 4853 return EPERM; /* :-) */ 4854 } 4855 wpi_nic_unlock(sc); 4856 4857 /* Wait for thermal sensor to calibrate. */ 4858 for (ntries = 0; ntries < 1000; ntries++) { 4859 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4860 break; 4861 DELAY(10); 4862 } 4863 4864 if (ntries == 1000) { 4865 device_printf(sc->sc_dev, 4866 "timeout waiting for thermal sensor calibration\n"); 4867 return ETIMEDOUT; 4868 } 4869 4870 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4871 return 0; 4872 } 4873 4874 /* 4875 * The firmware boot code is small and is intended to be copied directly into 4876 * the NIC internal memory (no DMA transfer). 4877 */ 4878 static int 4879 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4880 { 4881 int error, ntries; 4882 4883 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4884 4885 size /= sizeof (uint32_t); 4886 4887 if ((error = wpi_nic_lock(sc)) != 0) 4888 return error; 4889 4890 /* Copy microcode image into NIC memory. */ 4891 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4892 (const uint32_t *)ucode, size); 4893 4894 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4895 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4896 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4897 4898 /* Start boot load now. */ 4899 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4900 4901 /* Wait for transfer to complete. */ 4902 for (ntries = 0; ntries < 1000; ntries++) { 4903 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4904 DPRINTF(sc, WPI_DEBUG_HW, 4905 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4906 WPI_FH_TX_STATUS_IDLE(6), 4907 status & WPI_FH_TX_STATUS_IDLE(6)); 4908 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4909 DPRINTF(sc, WPI_DEBUG_HW, 4910 "Status Match! - ntries = %d\n", ntries); 4911 break; 4912 } 4913 DELAY(10); 4914 } 4915 if (ntries == 1000) { 4916 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4917 __func__); 4918 wpi_nic_unlock(sc); 4919 return ETIMEDOUT; 4920 } 4921 4922 /* Enable boot after power up. */ 4923 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4924 4925 wpi_nic_unlock(sc); 4926 return 0; 4927 } 4928 4929 static int 4930 wpi_load_firmware(struct wpi_softc *sc) 4931 { 4932 struct wpi_fw_info *fw = &sc->fw; 4933 struct wpi_dma_info *dma = &sc->fw_dma; 4934 int error; 4935 4936 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4937 4938 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4939 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4940 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4941 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4942 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4943 4944 /* Tell adapter where to find initialization sections. */ 4945 if ((error = wpi_nic_lock(sc)) != 0) 4946 return error; 4947 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4948 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4949 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4950 dma->paddr + WPI_FW_DATA_MAXSZ); 4951 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4952 wpi_nic_unlock(sc); 4953 4954 /* Load firmware boot code. */ 4955 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4956 if (error != 0) { 4957 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4958 __func__); 4959 return error; 4960 } 4961 4962 /* Now press "execute". */ 4963 WPI_WRITE(sc, WPI_RESET, 0); 4964 4965 /* Wait at most one second for first alive notification. */ 4966 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4967 device_printf(sc->sc_dev, 4968 "%s: timeout waiting for adapter to initialize, error %d\n", 4969 __func__, error); 4970 return error; 4971 } 4972 4973 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4974 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4975 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4976 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4977 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4978 4979 /* Tell adapter where to find runtime sections. */ 4980 if ((error = wpi_nic_lock(sc)) != 0) 4981 return error; 4982 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4983 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4984 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4985 dma->paddr + WPI_FW_DATA_MAXSZ); 4986 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4987 WPI_FW_UPDATED | fw->main.textsz); 4988 wpi_nic_unlock(sc); 4989 4990 return 0; 4991 } 4992 4993 static int 4994 wpi_read_firmware(struct wpi_softc *sc) 4995 { 4996 const struct firmware *fp; 4997 struct wpi_fw_info *fw = &sc->fw; 4998 const struct wpi_firmware_hdr *hdr; 4999 int error; 5000 5001 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5002 5003 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5004 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5005 5006 WPI_UNLOCK(sc); 5007 fp = firmware_get(WPI_FW_NAME); 5008 WPI_LOCK(sc); 5009 5010 if (fp == NULL) { 5011 device_printf(sc->sc_dev, 5012 "could not load firmware image '%s'\n", WPI_FW_NAME); 5013 return EINVAL; 5014 } 5015 5016 sc->fw_fp = fp; 5017 5018 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5019 device_printf(sc->sc_dev, 5020 "firmware file too short: %zu bytes\n", fp->datasize); 5021 error = EINVAL; 5022 goto fail; 5023 } 5024 5025 fw->size = fp->datasize; 5026 fw->data = (const uint8_t *)fp->data; 5027 5028 /* Extract firmware header information. */ 5029 hdr = (const struct wpi_firmware_hdr *)fw->data; 5030 5031 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5032 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5033 5034 fw->main.textsz = le32toh(hdr->rtextsz); 5035 fw->main.datasz = le32toh(hdr->rdatasz); 5036 fw->init.textsz = le32toh(hdr->itextsz); 5037 fw->init.datasz = le32toh(hdr->idatasz); 5038 fw->boot.textsz = le32toh(hdr->btextsz); 5039 fw->boot.datasz = 0; 5040 5041 /* Sanity-check firmware header. */ 5042 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5043 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5044 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5045 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5046 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5047 (fw->boot.textsz & 3) != 0) { 5048 device_printf(sc->sc_dev, "invalid firmware header\n"); 5049 error = EINVAL; 5050 goto fail; 5051 } 5052 5053 /* Check that all firmware sections fit. */ 5054 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5055 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5056 device_printf(sc->sc_dev, 5057 "firmware file too short: %zu bytes\n", fw->size); 5058 error = EINVAL; 5059 goto fail; 5060 } 5061 5062 /* Get pointers to firmware sections. */ 5063 fw->main.text = (const uint8_t *)(hdr + 1); 5064 fw->main.data = fw->main.text + fw->main.textsz; 5065 fw->init.text = fw->main.data + fw->main.datasz; 5066 fw->init.data = fw->init.text + fw->init.textsz; 5067 fw->boot.text = fw->init.data + fw->init.datasz; 5068 5069 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5070 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5071 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5072 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5073 fw->main.textsz, fw->main.datasz, 5074 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5075 5076 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5077 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5078 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5079 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5080 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5081 5082 return 0; 5083 5084 fail: wpi_unload_firmware(sc); 5085 return error; 5086 } 5087 5088 /** 5089 * Free the referenced firmware image 5090 */ 5091 static void 5092 wpi_unload_firmware(struct wpi_softc *sc) 5093 { 5094 if (sc->fw_fp != NULL) { 5095 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5096 sc->fw_fp = NULL; 5097 } 5098 } 5099 5100 static int 5101 wpi_clock_wait(struct wpi_softc *sc) 5102 { 5103 int ntries; 5104 5105 /* Set "initialization complete" bit. */ 5106 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5107 5108 /* Wait for clock stabilization. */ 5109 for (ntries = 0; ntries < 2500; ntries++) { 5110 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5111 return 0; 5112 DELAY(100); 5113 } 5114 device_printf(sc->sc_dev, 5115 "%s: timeout waiting for clock stabilization\n", __func__); 5116 5117 return ETIMEDOUT; 5118 } 5119 5120 static int 5121 wpi_apm_init(struct wpi_softc *sc) 5122 { 5123 uint32_t reg; 5124 int error; 5125 5126 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5127 5128 /* Disable L0s exit timer (NMI bug workaround). */ 5129 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5130 /* Don't wait for ICH L0s (ICH bug workaround). */ 5131 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5132 5133 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5134 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5135 5136 /* Retrieve PCIe Active State Power Management (ASPM). */ 5137 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5138 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5139 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5140 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5141 else 5142 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5143 5144 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5145 5146 /* Wait for clock stabilization before accessing prph. */ 5147 if ((error = wpi_clock_wait(sc)) != 0) 5148 return error; 5149 5150 if ((error = wpi_nic_lock(sc)) != 0) 5151 return error; 5152 /* Cleanup. */ 5153 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5154 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5155 5156 /* Enable DMA and BSM (Bootstrap State Machine). */ 5157 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5158 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5159 DELAY(20); 5160 /* Disable L1-Active. */ 5161 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5162 wpi_nic_unlock(sc); 5163 5164 return 0; 5165 } 5166 5167 static void 5168 wpi_apm_stop_master(struct wpi_softc *sc) 5169 { 5170 int ntries; 5171 5172 /* Stop busmaster DMA activity. */ 5173 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5174 5175 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5176 WPI_GP_CNTRL_MAC_PS) 5177 return; /* Already asleep. */ 5178 5179 for (ntries = 0; ntries < 100; ntries++) { 5180 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5181 return; 5182 DELAY(10); 5183 } 5184 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5185 __func__); 5186 } 5187 5188 static void 5189 wpi_apm_stop(struct wpi_softc *sc) 5190 { 5191 wpi_apm_stop_master(sc); 5192 5193 /* Reset the entire device. */ 5194 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5195 DELAY(10); 5196 /* Clear "initialization complete" bit. */ 5197 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5198 } 5199 5200 static void 5201 wpi_nic_config(struct wpi_softc *sc) 5202 { 5203 uint32_t rev; 5204 5205 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5206 5207 /* voodoo from the Linux "driver".. */ 5208 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5209 if ((rev & 0xc0) == 0x40) 5210 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5211 else if (!(rev & 0x80)) 5212 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5213 5214 if (sc->cap == 0x80) 5215 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5216 5217 if ((sc->rev & 0xf0) == 0xd0) 5218 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5219 else 5220 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5221 5222 if (sc->type > 1) 5223 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5224 } 5225 5226 static int 5227 wpi_hw_init(struct wpi_softc *sc) 5228 { 5229 uint8_t chnl; 5230 int ntries, error; 5231 5232 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5233 5234 /* Clear pending interrupts. */ 5235 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5236 5237 if ((error = wpi_apm_init(sc)) != 0) { 5238 device_printf(sc->sc_dev, 5239 "%s: could not power ON adapter, error %d\n", __func__, 5240 error); 5241 return error; 5242 } 5243 5244 /* Select VMAIN power source. */ 5245 if ((error = wpi_nic_lock(sc)) != 0) 5246 return error; 5247 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5248 wpi_nic_unlock(sc); 5249 /* Spin until VMAIN gets selected. */ 5250 for (ntries = 0; ntries < 5000; ntries++) { 5251 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5252 break; 5253 DELAY(10); 5254 } 5255 if (ntries == 5000) { 5256 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5257 return ETIMEDOUT; 5258 } 5259 5260 /* Perform adapter initialization. */ 5261 wpi_nic_config(sc); 5262 5263 /* Initialize RX ring. */ 5264 if ((error = wpi_nic_lock(sc)) != 0) 5265 return error; 5266 /* Set physical address of RX ring. */ 5267 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5268 /* Set physical address of RX read pointer. */ 5269 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5270 offsetof(struct wpi_shared, next)); 5271 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5272 /* Enable RX. */ 5273 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5274 WPI_FH_RX_CONFIG_DMA_ENA | 5275 WPI_FH_RX_CONFIG_RDRBD_ENA | 5276 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5277 WPI_FH_RX_CONFIG_MAXFRAG | 5278 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5279 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5280 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5281 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5282 wpi_nic_unlock(sc); 5283 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5284 5285 /* Initialize TX rings. */ 5286 if ((error = wpi_nic_lock(sc)) != 0) 5287 return error; 5288 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5289 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5290 /* Enable all 6 TX rings. */ 5291 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5292 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5293 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5294 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5295 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5296 /* Set physical address of TX rings. */ 5297 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5298 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5299 5300 /* Enable all DMA channels. */ 5301 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5302 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5303 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5304 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5305 } 5306 wpi_nic_unlock(sc); 5307 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5308 5309 /* Clear "radio off" and "commands blocked" bits. */ 5310 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5311 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5312 5313 /* Clear pending interrupts. */ 5314 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5315 /* Enable interrupts. */ 5316 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5317 5318 /* _Really_ make sure "radio off" bit is cleared! */ 5319 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5320 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5321 5322 if ((error = wpi_load_firmware(sc)) != 0) { 5323 device_printf(sc->sc_dev, 5324 "%s: could not load firmware, error %d\n", __func__, 5325 error); 5326 return error; 5327 } 5328 /* Wait at most one second for firmware alive notification. */ 5329 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5330 device_printf(sc->sc_dev, 5331 "%s: timeout waiting for adapter to initialize, error %d\n", 5332 __func__, error); 5333 return error; 5334 } 5335 5336 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5337 5338 /* Do post-firmware initialization. */ 5339 return wpi_post_alive(sc); 5340 } 5341 5342 static void 5343 wpi_hw_stop(struct wpi_softc *sc) 5344 { 5345 uint8_t chnl, qid; 5346 int ntries; 5347 5348 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5349 5350 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5351 wpi_nic_lock(sc); 5352 5353 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5354 5355 /* Disable interrupts. */ 5356 WPI_WRITE(sc, WPI_INT_MASK, 0); 5357 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5358 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5359 5360 /* Make sure we no longer hold the NIC lock. */ 5361 wpi_nic_unlock(sc); 5362 5363 if (wpi_nic_lock(sc) == 0) { 5364 /* Stop TX scheduler. */ 5365 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5366 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5367 5368 /* Stop all DMA channels. */ 5369 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5370 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5371 for (ntries = 0; ntries < 200; ntries++) { 5372 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5373 WPI_FH_TX_STATUS_IDLE(chnl)) 5374 break; 5375 DELAY(10); 5376 } 5377 } 5378 wpi_nic_unlock(sc); 5379 } 5380 5381 /* Stop RX ring. */ 5382 wpi_reset_rx_ring(sc); 5383 5384 /* Reset all TX rings. */ 5385 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5386 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5387 5388 if (wpi_nic_lock(sc) == 0) { 5389 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5390 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5391 wpi_nic_unlock(sc); 5392 } 5393 DELAY(5); 5394 /* Power OFF adapter. */ 5395 wpi_apm_stop(sc); 5396 } 5397 5398 static void 5399 wpi_radio_on(void *arg0, int pending) 5400 { 5401 struct wpi_softc *sc = arg0; 5402 struct ieee80211com *ic = &sc->sc_ic; 5403 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5404 5405 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5406 5407 WPI_LOCK(sc); 5408 callout_stop(&sc->watchdog_rfkill); 5409 WPI_UNLOCK(sc); 5410 5411 if (vap != NULL) 5412 ieee80211_init(vap); 5413 } 5414 5415 static void 5416 wpi_radio_off(void *arg0, int pending) 5417 { 5418 struct wpi_softc *sc = arg0; 5419 struct ieee80211com *ic = &sc->sc_ic; 5420 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5421 5422 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5423 5424 ieee80211_notify_radio(ic, 0); 5425 wpi_stop(sc); 5426 if (vap != NULL) 5427 ieee80211_stop(vap); 5428 5429 WPI_LOCK(sc); 5430 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5431 WPI_UNLOCK(sc); 5432 } 5433 5434 static int 5435 wpi_init(struct wpi_softc *sc) 5436 { 5437 int error = 0; 5438 5439 WPI_LOCK(sc); 5440 5441 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5442 5443 if (sc->sc_running != 0) 5444 goto end; 5445 5446 /* Check that the radio is not disabled by hardware switch. */ 5447 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5448 device_printf(sc->sc_dev, 5449 "RF switch: radio disabled (%s)\n", __func__); 5450 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5451 sc); 5452 error = EINPROGRESS; 5453 goto end; 5454 } 5455 5456 /* Read firmware images from the filesystem. */ 5457 if ((error = wpi_read_firmware(sc)) != 0) { 5458 device_printf(sc->sc_dev, 5459 "%s: could not read firmware, error %d\n", __func__, 5460 error); 5461 goto end; 5462 } 5463 5464 sc->sc_running = 1; 5465 5466 /* Initialize hardware and upload firmware. */ 5467 error = wpi_hw_init(sc); 5468 wpi_unload_firmware(sc); 5469 if (error != 0) { 5470 device_printf(sc->sc_dev, 5471 "%s: could not initialize hardware, error %d\n", __func__, 5472 error); 5473 goto fail; 5474 } 5475 5476 /* Configure adapter now that it is ready. */ 5477 if ((error = wpi_config(sc)) != 0) { 5478 device_printf(sc->sc_dev, 5479 "%s: could not configure device, error %d\n", __func__, 5480 error); 5481 goto fail; 5482 } 5483 5484 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5485 5486 WPI_UNLOCK(sc); 5487 5488 return 0; 5489 5490 fail: wpi_stop_locked(sc); 5491 5492 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5493 WPI_UNLOCK(sc); 5494 5495 return error; 5496 } 5497 5498 static void 5499 wpi_stop_locked(struct wpi_softc *sc) 5500 { 5501 5502 WPI_LOCK_ASSERT(sc); 5503 5504 if (sc->sc_running == 0) 5505 return; 5506 5507 WPI_TX_LOCK(sc); 5508 WPI_TXQ_LOCK(sc); 5509 sc->sc_running = 0; 5510 WPI_TXQ_UNLOCK(sc); 5511 WPI_TX_UNLOCK(sc); 5512 5513 WPI_TXQ_STATE_LOCK(sc); 5514 callout_stop(&sc->tx_timeout); 5515 WPI_TXQ_STATE_UNLOCK(sc); 5516 5517 WPI_RXON_LOCK(sc); 5518 callout_stop(&sc->scan_timeout); 5519 callout_stop(&sc->calib_to); 5520 WPI_RXON_UNLOCK(sc); 5521 5522 /* Power OFF hardware. */ 5523 wpi_hw_stop(sc); 5524 } 5525 5526 static void 5527 wpi_stop(struct wpi_softc *sc) 5528 { 5529 WPI_LOCK(sc); 5530 wpi_stop_locked(sc); 5531 WPI_UNLOCK(sc); 5532 } 5533 5534 /* 5535 * Callback from net80211 to start a scan. 5536 */ 5537 static void 5538 wpi_scan_start(struct ieee80211com *ic) 5539 { 5540 struct wpi_softc *sc = ic->ic_softc; 5541 5542 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5543 } 5544 5545 /* 5546 * Callback from net80211 to terminate a scan. 5547 */ 5548 static void 5549 wpi_scan_end(struct ieee80211com *ic) 5550 { 5551 struct wpi_softc *sc = ic->ic_softc; 5552 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5553 5554 if (vap->iv_state == IEEE80211_S_RUN) 5555 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5556 } 5557 5558 /** 5559 * Called by the net80211 framework to indicate to the driver 5560 * that the channel should be changed 5561 */ 5562 static void 5563 wpi_set_channel(struct ieee80211com *ic) 5564 { 5565 const struct ieee80211_channel *c = ic->ic_curchan; 5566 struct wpi_softc *sc = ic->ic_softc; 5567 int error; 5568 5569 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5570 5571 WPI_LOCK(sc); 5572 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5573 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5574 WPI_UNLOCK(sc); 5575 WPI_TX_LOCK(sc); 5576 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5577 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5578 WPI_TX_UNLOCK(sc); 5579 5580 /* 5581 * Only need to set the channel in Monitor mode. AP scanning and auth 5582 * are already taken care of by their respective firmware commands. 5583 */ 5584 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5585 WPI_RXON_LOCK(sc); 5586 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5587 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5588 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5589 WPI_RXON_24GHZ); 5590 } else { 5591 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5592 WPI_RXON_24GHZ); 5593 } 5594 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5595 device_printf(sc->sc_dev, 5596 "%s: error %d setting channel\n", __func__, 5597 error); 5598 WPI_RXON_UNLOCK(sc); 5599 } 5600 } 5601 5602 /** 5603 * Called by net80211 to indicate that we need to scan the current 5604 * channel. The channel is previously be set via the wpi_set_channel 5605 * callback. 5606 */ 5607 static void 5608 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5609 { 5610 struct ieee80211vap *vap = ss->ss_vap; 5611 struct ieee80211com *ic = vap->iv_ic; 5612 struct wpi_softc *sc = ic->ic_softc; 5613 int error; 5614 5615 WPI_RXON_LOCK(sc); 5616 error = wpi_scan(sc, ic->ic_curchan); 5617 WPI_RXON_UNLOCK(sc); 5618 if (error != 0) 5619 ieee80211_cancel_scan(vap); 5620 } 5621 5622 /** 5623 * Called by the net80211 framework to indicate 5624 * the minimum dwell time has been met, terminate the scan. 5625 * We don't actually terminate the scan as the firmware will notify 5626 * us when it's finished and we have no way to interrupt it. 5627 */ 5628 static void 5629 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5630 { 5631 /* NB: don't try to abort scan; wait for firmware to finish */ 5632 } 5633 5634 static void 5635 wpi_hw_reset(void *arg, int pending) 5636 { 5637 struct wpi_softc *sc = arg; 5638 struct ieee80211com *ic = &sc->sc_ic; 5639 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5640 5641 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5642 5643 ieee80211_notify_radio(ic, 0); 5644 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5645 ieee80211_cancel_scan(vap); 5646 5647 wpi_stop(sc); 5648 if (vap != NULL) { 5649 ieee80211_stop(vap); 5650 ieee80211_init(vap); 5651 } 5652 } 5653