1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 /* 24 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 25 * 26 * The 3945ABG network adapter doesn't use traditional hardware as 27 * many other adaptors do. Instead at run time the eeprom is set into a known 28 * state and told to load boot firmware. The boot firmware loads an init and a 29 * main binary firmware image into SRAM on the card via DMA. 30 * Once the firmware is loaded, the driver/hw then 31 * communicate by way of circular dma rings via the SRAM to the firmware. 32 * 33 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 34 * The 4 tx data rings allow for prioritization QoS. 35 * 36 * The rx data ring consists of 32 dma buffers. Two registers are used to 37 * indicate where in the ring the driver and the firmware are up to. The 38 * driver sets the initial read index (reg1) and the initial write index (reg2), 39 * the firmware updates the read index (reg1) on rx of a packet and fires an 40 * interrupt. The driver then processes the buffers starting at reg1 indicating 41 * to the firmware which buffers have been accessed by updating reg2. At the 42 * same time allocating new memory for the processed buffer. 43 * 44 * A similar thing happens with the tx rings. The difference is the firmware 45 * stop processing buffers once the queue is full and until confirmation 46 * of a successful transmition (tx_done) has occurred. 47 * 48 * The command ring operates in the same manner as the tx queues. 49 * 50 * All communication direct to the card (ie eeprom) is classed as Stage1 51 * communication 52 * 53 * All communication via the firmware to the card is classed as State2. 54 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 55 * firmware. The bootstrap firmware and runtime firmware are loaded 56 * from host memory via dma to the card then told to execute. From this point 57 * on the majority of communications between the driver and the card goes 58 * via the firmware. 59 */ 60 61 #include "opt_wlan.h" 62 #include "opt_wpi.h" 63 64 #include <sys/param.h> 65 #include <sys/sysctl.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 #include <sys/systm.h> 71 #include <sys/malloc.h> 72 #include <sys/queue.h> 73 #include <sys/taskqueue.h> 74 #include <sys/module.h> 75 #include <sys/bus.h> 76 #include <sys/endian.h> 77 #include <sys/linker.h> 78 #include <sys/firmware.h> 79 80 #include <machine/bus.h> 81 #include <machine/resource.h> 82 #include <sys/rman.h> 83 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 87 #include <net/bpf.h> 88 #include <net/if.h> 89 #include <net/if_var.h> 90 #include <net/if_arp.h> 91 #include <net/ethernet.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/if_types.h> 95 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/in_var.h> 99 #include <netinet/if_ether.h> 100 #include <netinet/ip.h> 101 102 #include <net80211/ieee80211_var.h> 103 #include <net80211/ieee80211_radiotap.h> 104 #include <net80211/ieee80211_regdomain.h> 105 #include <net80211/ieee80211_ratectl.h> 106 107 #include <dev/wpi/if_wpireg.h> 108 #include <dev/wpi/if_wpivar.h> 109 #include <dev/wpi/if_wpi_debug.h> 110 111 struct wpi_ident { 112 uint16_t vendor; 113 uint16_t device; 114 uint16_t subdevice; 115 const char *name; 116 }; 117 118 static const struct wpi_ident wpi_ident_table[] = { 119 /* The below entries support ABG regardless of the subid */ 120 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 122 /* The below entries only support BG */ 123 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 127 { 0, 0, 0, NULL } 128 }; 129 130 static int wpi_probe(device_t); 131 static int wpi_attach(device_t); 132 static void wpi_radiotap_attach(struct wpi_softc *); 133 static void wpi_sysctlattach(struct wpi_softc *); 134 static void wpi_init_beacon(struct wpi_vap *); 135 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 136 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137 const uint8_t [IEEE80211_ADDR_LEN], 138 const uint8_t [IEEE80211_ADDR_LEN]); 139 static void wpi_vap_delete(struct ieee80211vap *); 140 static int wpi_detach(device_t); 141 static int wpi_shutdown(device_t); 142 static int wpi_suspend(device_t); 143 static int wpi_resume(device_t); 144 static int wpi_nic_lock(struct wpi_softc *); 145 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 146 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 147 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 148 void **, bus_size_t, bus_size_t); 149 static void wpi_dma_contig_free(struct wpi_dma_info *); 150 static int wpi_alloc_shared(struct wpi_softc *); 151 static void wpi_free_shared(struct wpi_softc *); 152 static int wpi_alloc_fwmem(struct wpi_softc *); 153 static void wpi_free_fwmem(struct wpi_softc *); 154 static int wpi_alloc_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring(struct wpi_softc *); 156 static void wpi_update_rx_ring_ps(struct wpi_softc *); 157 static void wpi_reset_rx_ring(struct wpi_softc *); 158 static void wpi_free_rx_ring(struct wpi_softc *); 159 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 160 uint8_t); 161 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static void wpi_update_tx_ring_ps(struct wpi_softc *, 163 struct wpi_tx_ring *); 164 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 166 static int wpi_read_eeprom(struct wpi_softc *, 167 uint8_t macaddr[IEEE80211_ADDR_LEN]); 168 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 169 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *, 170 struct ieee80211_channel[]); 171 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 172 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 173 struct ieee80211_channel *); 174 static void wpi_getradiocaps(struct ieee80211com *, int, int *, 175 struct ieee80211_channel[]); 176 static int wpi_setregdomain(struct ieee80211com *, 177 struct ieee80211_regdomain *, int, 178 struct ieee80211_channel[]); 179 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 180 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 181 const uint8_t mac[IEEE80211_ADDR_LEN]); 182 static void wpi_node_free(struct ieee80211_node *); 183 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 184 const struct ieee80211_rx_stats *, 185 int, int); 186 static void wpi_restore_node(void *, struct ieee80211_node *); 187 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 188 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189 static void wpi_calib_timeout(void *); 190 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 191 struct wpi_rx_data *); 192 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 193 struct wpi_rx_data *); 194 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 195 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 196 static void wpi_notif_intr(struct wpi_softc *); 197 static void wpi_wakeup_intr(struct wpi_softc *); 198 #ifdef WPI_DEBUG 199 static void wpi_debug_registers(struct wpi_softc *); 200 #endif 201 static void wpi_fatal_intr(struct wpi_softc *); 202 static void wpi_intr(void *); 203 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 204 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 205 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 206 struct ieee80211_node *); 207 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 208 struct ieee80211_node *, 209 const struct ieee80211_bpf_params *); 210 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 211 const struct ieee80211_bpf_params *); 212 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 213 static void wpi_watchdog_rfkill(void *); 214 static void wpi_scan_timeout(void *); 215 static void wpi_tx_timeout(void *); 216 static void wpi_parent(struct ieee80211com *); 217 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 218 int); 219 static int wpi_mrr_setup(struct wpi_softc *); 220 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 221 static int wpi_add_broadcast_node(struct wpi_softc *, int); 222 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 223 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 224 static int wpi_updateedca(struct ieee80211com *); 225 static void wpi_set_promisc(struct wpi_softc *); 226 static void wpi_update_promisc(struct ieee80211com *); 227 static void wpi_update_mcast(struct ieee80211com *); 228 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 229 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 230 static void wpi_power_calibration(struct wpi_softc *); 231 static int wpi_set_txpower(struct wpi_softc *, int); 232 static int wpi_get_power_index(struct wpi_softc *, 233 struct wpi_power_group *, uint8_t, int, int); 234 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 235 static int wpi_send_btcoex(struct wpi_softc *); 236 static int wpi_send_rxon(struct wpi_softc *, int, int); 237 static int wpi_config(struct wpi_softc *); 238 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 239 struct ieee80211_channel *, uint8_t); 240 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 241 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 242 struct ieee80211_channel *); 243 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 244 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 245 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 246 static int wpi_config_beacon(struct wpi_vap *); 247 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 248 static void wpi_update_beacon(struct ieee80211vap *, int); 249 static void wpi_newassoc(struct ieee80211_node *, int); 250 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 251 static int wpi_load_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_load_key_cb(void *, struct ieee80211_node *); 254 static int wpi_set_global_keys(struct ieee80211_node *); 255 static int wpi_del_key(struct ieee80211_node *, 256 const struct ieee80211_key *); 257 static void wpi_del_key_cb(void *, struct ieee80211_node *); 258 static int wpi_process_key(struct ieee80211vap *, 259 const struct ieee80211_key *, int); 260 static int wpi_key_set(struct ieee80211vap *, 261 const struct ieee80211_key *); 262 static int wpi_key_delete(struct ieee80211vap *, 263 const struct ieee80211_key *); 264 static int wpi_post_alive(struct wpi_softc *); 265 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 266 uint32_t); 267 static int wpi_load_firmware(struct wpi_softc *); 268 static int wpi_read_firmware(struct wpi_softc *); 269 static void wpi_unload_firmware(struct wpi_softc *); 270 static int wpi_clock_wait(struct wpi_softc *); 271 static int wpi_apm_init(struct wpi_softc *); 272 static void wpi_apm_stop_master(struct wpi_softc *); 273 static void wpi_apm_stop(struct wpi_softc *); 274 static void wpi_nic_config(struct wpi_softc *); 275 static int wpi_hw_init(struct wpi_softc *); 276 static void wpi_hw_stop(struct wpi_softc *); 277 static void wpi_radio_on(void *, int); 278 static void wpi_radio_off(void *, int); 279 static int wpi_init(struct wpi_softc *); 280 static void wpi_stop_locked(struct wpi_softc *); 281 static void wpi_stop(struct wpi_softc *); 282 static void wpi_scan_start(struct ieee80211com *); 283 static void wpi_scan_end(struct ieee80211com *); 284 static void wpi_set_channel(struct ieee80211com *); 285 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 286 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 287 static void wpi_hw_reset(void *, int); 288 289 static device_method_t wpi_methods[] = { 290 /* Device interface */ 291 DEVMETHOD(device_probe, wpi_probe), 292 DEVMETHOD(device_attach, wpi_attach), 293 DEVMETHOD(device_detach, wpi_detach), 294 DEVMETHOD(device_shutdown, wpi_shutdown), 295 DEVMETHOD(device_suspend, wpi_suspend), 296 DEVMETHOD(device_resume, wpi_resume), 297 298 DEVMETHOD_END 299 }; 300 301 static driver_t wpi_driver = { 302 "wpi", 303 wpi_methods, 304 sizeof (struct wpi_softc) 305 }; 306 static devclass_t wpi_devclass; 307 308 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 309 310 MODULE_VERSION(wpi, 1); 311 312 MODULE_DEPEND(wpi, pci, 1, 1, 1); 313 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 314 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 315 316 static int 317 wpi_probe(device_t dev) 318 { 319 const struct wpi_ident *ident; 320 321 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 322 if (pci_get_vendor(dev) == ident->vendor && 323 pci_get_device(dev) == ident->device) { 324 device_set_desc(dev, ident->name); 325 return (BUS_PROBE_DEFAULT); 326 } 327 } 328 return ENXIO; 329 } 330 331 static int 332 wpi_attach(device_t dev) 333 { 334 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 335 struct ieee80211com *ic; 336 uint8_t i; 337 int error, rid; 338 #ifdef WPI_DEBUG 339 int supportsa = 1; 340 const struct wpi_ident *ident; 341 #endif 342 343 sc->sc_dev = dev; 344 345 #ifdef WPI_DEBUG 346 error = resource_int_value(device_get_name(sc->sc_dev), 347 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 348 if (error != 0) 349 sc->sc_debug = 0; 350 #else 351 sc->sc_debug = 0; 352 #endif 353 354 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 355 356 /* 357 * Get the offset of the PCI Express Capability Structure in PCI 358 * Configuration Space. 359 */ 360 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 361 if (error != 0) { 362 device_printf(dev, "PCIe capability structure not found!\n"); 363 return error; 364 } 365 366 /* 367 * Some card's only support 802.11b/g not a, check to see if 368 * this is one such card. A 0x0 in the subdevice table indicates 369 * the entire subdevice range is to be ignored. 370 */ 371 #ifdef WPI_DEBUG 372 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 373 if (ident->subdevice && 374 pci_get_subdevice(dev) == ident->subdevice) { 375 supportsa = 0; 376 break; 377 } 378 } 379 #endif 380 381 /* Clear device-specific "PCI retry timeout" register (41h). */ 382 pci_write_config(dev, 0x41, 0, 1); 383 384 /* Enable bus-mastering. */ 385 pci_enable_busmaster(dev); 386 387 rid = PCIR_BAR(0); 388 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 389 RF_ACTIVE); 390 if (sc->mem == NULL) { 391 device_printf(dev, "can't map mem space\n"); 392 return ENOMEM; 393 } 394 sc->sc_st = rman_get_bustag(sc->mem); 395 sc->sc_sh = rman_get_bushandle(sc->mem); 396 397 rid = 1; 398 if (pci_alloc_msi(dev, &rid) == 0) 399 rid = 1; 400 else 401 rid = 0; 402 /* Install interrupt handler. */ 403 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 404 (rid != 0 ? 0 : RF_SHAREABLE)); 405 if (sc->irq == NULL) { 406 device_printf(dev, "can't map interrupt\n"); 407 error = ENOMEM; 408 goto fail; 409 } 410 411 WPI_LOCK_INIT(sc); 412 WPI_TX_LOCK_INIT(sc); 413 WPI_RXON_LOCK_INIT(sc); 414 WPI_NT_LOCK_INIT(sc); 415 WPI_TXQ_LOCK_INIT(sc); 416 WPI_TXQ_STATE_LOCK_INIT(sc); 417 418 /* Allocate DMA memory for firmware transfers. */ 419 if ((error = wpi_alloc_fwmem(sc)) != 0) { 420 device_printf(dev, 421 "could not allocate memory for firmware, error %d\n", 422 error); 423 goto fail; 424 } 425 426 /* Allocate shared page. */ 427 if ((error = wpi_alloc_shared(sc)) != 0) { 428 device_printf(dev, "could not allocate shared page\n"); 429 goto fail; 430 } 431 432 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 433 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 434 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 435 device_printf(dev, 436 "could not allocate TX ring %d, error %d\n", i, 437 error); 438 goto fail; 439 } 440 } 441 442 /* Allocate RX ring. */ 443 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 444 device_printf(dev, "could not allocate RX ring, error %d\n", 445 error); 446 goto fail; 447 } 448 449 /* Clear pending interrupts. */ 450 WPI_WRITE(sc, WPI_INT, 0xffffffff); 451 452 ic = &sc->sc_ic; 453 ic->ic_softc = sc; 454 ic->ic_name = device_get_nameunit(dev); 455 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 456 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 457 458 /* Set device capabilities. */ 459 ic->ic_caps = 460 IEEE80211_C_STA /* station mode supported */ 461 | IEEE80211_C_IBSS /* IBSS mode supported */ 462 | IEEE80211_C_HOSTAP /* Host access point mode */ 463 | IEEE80211_C_MONITOR /* monitor mode supported */ 464 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 465 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 466 | IEEE80211_C_TXFRAG /* handle tx frags */ 467 | IEEE80211_C_TXPMGT /* tx power management */ 468 | IEEE80211_C_SHSLOT /* short slot time supported */ 469 | IEEE80211_C_WPA /* 802.11i */ 470 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 471 | IEEE80211_C_WME /* 802.11e */ 472 | IEEE80211_C_PMGT /* Station-side power mgmt */ 473 ; 474 475 ic->ic_cryptocaps = 476 IEEE80211_CRYPTO_AES_CCM; 477 478 /* 479 * Read in the eeprom and also setup the channels for 480 * net80211. We don't set the rates as net80211 does this for us 481 */ 482 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 483 device_printf(dev, "could not read EEPROM, error %d\n", 484 error); 485 goto fail; 486 } 487 488 #ifdef WPI_DEBUG 489 if (bootverbose) { 490 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 491 sc->domain); 492 device_printf(sc->sc_dev, "Hardware Type: %c\n", 493 sc->type > 1 ? 'B': '?'); 494 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 495 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 496 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 497 supportsa ? "does" : "does not"); 498 499 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 500 check what sc->rev really represents - benjsc 20070615 */ 501 } 502 #endif 503 504 ieee80211_ifattach(ic); 505 ic->ic_vap_create = wpi_vap_create; 506 ic->ic_vap_delete = wpi_vap_delete; 507 ic->ic_parent = wpi_parent; 508 ic->ic_raw_xmit = wpi_raw_xmit; 509 ic->ic_transmit = wpi_transmit; 510 ic->ic_node_alloc = wpi_node_alloc; 511 sc->sc_node_free = ic->ic_node_free; 512 ic->ic_node_free = wpi_node_free; 513 ic->ic_wme.wme_update = wpi_updateedca; 514 ic->ic_update_promisc = wpi_update_promisc; 515 ic->ic_update_mcast = wpi_update_mcast; 516 ic->ic_newassoc = wpi_newassoc; 517 ic->ic_scan_start = wpi_scan_start; 518 ic->ic_scan_end = wpi_scan_end; 519 ic->ic_set_channel = wpi_set_channel; 520 ic->ic_scan_curchan = wpi_scan_curchan; 521 ic->ic_scan_mindwell = wpi_scan_mindwell; 522 ic->ic_getradiocaps = wpi_getradiocaps; 523 ic->ic_setregdomain = wpi_setregdomain; 524 525 sc->sc_update_rx_ring = wpi_update_rx_ring; 526 sc->sc_update_tx_ring = wpi_update_tx_ring; 527 528 wpi_radiotap_attach(sc); 529 530 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 531 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 532 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 533 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 534 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 535 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 536 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 537 538 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 539 taskqueue_thread_enqueue, &sc->sc_tq); 540 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 541 if (error != 0) { 542 device_printf(dev, "can't start threads, error %d\n", error); 543 goto fail; 544 } 545 546 wpi_sysctlattach(sc); 547 548 /* 549 * Hook our interrupt after all initialization is complete. 550 */ 551 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 552 NULL, wpi_intr, sc, &sc->sc_ih); 553 if (error != 0) { 554 device_printf(dev, "can't establish interrupt, error %d\n", 555 error); 556 goto fail; 557 } 558 559 if (bootverbose) 560 ieee80211_announce(ic); 561 562 #ifdef WPI_DEBUG 563 if (sc->sc_debug & WPI_DEBUG_HW) 564 ieee80211_announce_channels(ic); 565 #endif 566 567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 568 return 0; 569 570 fail: wpi_detach(dev); 571 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 572 return error; 573 } 574 575 /* 576 * Attach the interface to 802.11 radiotap. 577 */ 578 static void 579 wpi_radiotap_attach(struct wpi_softc *sc) 580 { 581 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 582 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 583 584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 585 ieee80211_radiotap_attach(&sc->sc_ic, 586 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 587 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 588 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 589 } 590 591 static void 592 wpi_sysctlattach(struct wpi_softc *sc) 593 { 594 #ifdef WPI_DEBUG 595 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 596 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 597 598 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 599 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 600 "control debugging printfs"); 601 #endif 602 } 603 604 static void 605 wpi_init_beacon(struct wpi_vap *wvp) 606 { 607 struct wpi_buf *bcn = &wvp->wv_bcbuf; 608 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 609 610 cmd->id = WPI_ID_BROADCAST; 611 cmd->ofdm_mask = 0xff; 612 cmd->cck_mask = 0x0f; 613 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 614 615 /* 616 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 617 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 618 */ 619 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 620 621 bcn->code = WPI_CMD_SET_BEACON; 622 bcn->ac = WPI_CMD_QUEUE_NUM; 623 bcn->size = sizeof(struct wpi_cmd_beacon); 624 } 625 626 static struct ieee80211vap * 627 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 628 enum ieee80211_opmode opmode, int flags, 629 const uint8_t bssid[IEEE80211_ADDR_LEN], 630 const uint8_t mac[IEEE80211_ADDR_LEN]) 631 { 632 struct wpi_vap *wvp; 633 struct ieee80211vap *vap; 634 635 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 636 return NULL; 637 638 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 639 vap = &wvp->wv_vap; 640 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 641 642 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 643 WPI_VAP_LOCK_INIT(wvp); 644 wpi_init_beacon(wvp); 645 } 646 647 /* Override with driver methods. */ 648 vap->iv_key_set = wpi_key_set; 649 vap->iv_key_delete = wpi_key_delete; 650 if (opmode == IEEE80211_M_IBSS) { 651 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 652 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 653 } 654 wvp->wv_newstate = vap->iv_newstate; 655 vap->iv_newstate = wpi_newstate; 656 vap->iv_update_beacon = wpi_update_beacon; 657 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 658 659 ieee80211_ratectl_init(vap); 660 /* Complete setup. */ 661 ieee80211_vap_attach(vap, ieee80211_media_change, 662 ieee80211_media_status, mac); 663 ic->ic_opmode = opmode; 664 return vap; 665 } 666 667 static void 668 wpi_vap_delete(struct ieee80211vap *vap) 669 { 670 struct wpi_vap *wvp = WPI_VAP(vap); 671 struct wpi_buf *bcn = &wvp->wv_bcbuf; 672 enum ieee80211_opmode opmode = vap->iv_opmode; 673 674 ieee80211_ratectl_deinit(vap); 675 ieee80211_vap_detach(vap); 676 677 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 678 if (bcn->m != NULL) 679 m_freem(bcn->m); 680 681 WPI_VAP_LOCK_DESTROY(wvp); 682 } 683 684 free(wvp, M_80211_VAP); 685 } 686 687 static int 688 wpi_detach(device_t dev) 689 { 690 struct wpi_softc *sc = device_get_softc(dev); 691 struct ieee80211com *ic = &sc->sc_ic; 692 uint8_t qid; 693 694 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 695 696 if (ic->ic_vap_create == wpi_vap_create) { 697 ieee80211_draintask(ic, &sc->sc_radioon_task); 698 699 wpi_stop(sc); 700 701 if (sc->sc_tq != NULL) { 702 taskqueue_drain_all(sc->sc_tq); 703 taskqueue_free(sc->sc_tq); 704 } 705 706 callout_drain(&sc->watchdog_rfkill); 707 callout_drain(&sc->tx_timeout); 708 callout_drain(&sc->scan_timeout); 709 callout_drain(&sc->calib_to); 710 ieee80211_ifdetach(ic); 711 } 712 713 /* Uninstall interrupt handler. */ 714 if (sc->irq != NULL) { 715 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 716 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 717 sc->irq); 718 pci_release_msi(dev); 719 } 720 721 if (sc->txq[0].data_dmat) { 722 /* Free DMA resources. */ 723 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 724 wpi_free_tx_ring(sc, &sc->txq[qid]); 725 726 wpi_free_rx_ring(sc); 727 wpi_free_shared(sc); 728 } 729 730 if (sc->fw_dma.tag) 731 wpi_free_fwmem(sc); 732 733 if (sc->mem != NULL) 734 bus_release_resource(dev, SYS_RES_MEMORY, 735 rman_get_rid(sc->mem), sc->mem); 736 737 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 738 WPI_TXQ_STATE_LOCK_DESTROY(sc); 739 WPI_TXQ_LOCK_DESTROY(sc); 740 WPI_NT_LOCK_DESTROY(sc); 741 WPI_RXON_LOCK_DESTROY(sc); 742 WPI_TX_LOCK_DESTROY(sc); 743 WPI_LOCK_DESTROY(sc); 744 return 0; 745 } 746 747 static int 748 wpi_shutdown(device_t dev) 749 { 750 struct wpi_softc *sc = device_get_softc(dev); 751 752 wpi_stop(sc); 753 return 0; 754 } 755 756 static int 757 wpi_suspend(device_t dev) 758 { 759 struct wpi_softc *sc = device_get_softc(dev); 760 struct ieee80211com *ic = &sc->sc_ic; 761 762 ieee80211_suspend_all(ic); 763 return 0; 764 } 765 766 static int 767 wpi_resume(device_t dev) 768 { 769 struct wpi_softc *sc = device_get_softc(dev); 770 struct ieee80211com *ic = &sc->sc_ic; 771 772 /* Clear device-specific "PCI retry timeout" register (41h). */ 773 pci_write_config(dev, 0x41, 0, 1); 774 775 ieee80211_resume_all(ic); 776 return 0; 777 } 778 779 /* 780 * Grab exclusive access to NIC memory. 781 */ 782 static int 783 wpi_nic_lock(struct wpi_softc *sc) 784 { 785 int ntries; 786 787 /* Request exclusive access to NIC. */ 788 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 789 790 /* Spin until we actually get the lock. */ 791 for (ntries = 0; ntries < 1000; ntries++) { 792 if ((WPI_READ(sc, WPI_GP_CNTRL) & 793 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 794 WPI_GP_CNTRL_MAC_ACCESS_ENA) 795 return 0; 796 DELAY(10); 797 } 798 799 device_printf(sc->sc_dev, "could not lock memory\n"); 800 801 return ETIMEDOUT; 802 } 803 804 /* 805 * Release lock on NIC memory. 806 */ 807 static __inline void 808 wpi_nic_unlock(struct wpi_softc *sc) 809 { 810 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 811 } 812 813 static __inline uint32_t 814 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 815 { 816 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 817 WPI_BARRIER_READ_WRITE(sc); 818 return WPI_READ(sc, WPI_PRPH_RDATA); 819 } 820 821 static __inline void 822 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 823 { 824 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 825 WPI_BARRIER_WRITE(sc); 826 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 827 } 828 829 static __inline void 830 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 831 { 832 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 833 } 834 835 static __inline void 836 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 837 { 838 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 839 } 840 841 static __inline void 842 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 843 const uint32_t *data, uint32_t count) 844 { 845 for (; count != 0; count--, data++, addr += 4) 846 wpi_prph_write(sc, addr, *data); 847 } 848 849 static __inline uint32_t 850 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 851 { 852 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 853 WPI_BARRIER_READ_WRITE(sc); 854 return WPI_READ(sc, WPI_MEM_RDATA); 855 } 856 857 static __inline void 858 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 859 int count) 860 { 861 for (; count > 0; count--, addr += 4) 862 *data++ = wpi_mem_read(sc, addr); 863 } 864 865 static int 866 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 867 { 868 uint8_t *out = data; 869 uint32_t val; 870 int error, ntries; 871 872 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 873 874 if ((error = wpi_nic_lock(sc)) != 0) 875 return error; 876 877 for (; count > 0; count -= 2, addr++) { 878 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 879 for (ntries = 0; ntries < 10; ntries++) { 880 val = WPI_READ(sc, WPI_EEPROM); 881 if (val & WPI_EEPROM_READ_VALID) 882 break; 883 DELAY(5); 884 } 885 if (ntries == 10) { 886 device_printf(sc->sc_dev, 887 "timeout reading ROM at 0x%x\n", addr); 888 return ETIMEDOUT; 889 } 890 *out++= val >> 16; 891 if (count > 1) 892 *out ++= val >> 24; 893 } 894 895 wpi_nic_unlock(sc); 896 897 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 898 899 return 0; 900 } 901 902 static void 903 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 904 { 905 if (error != 0) 906 return; 907 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 908 *(bus_addr_t *)arg = segs[0].ds_addr; 909 } 910 911 /* 912 * Allocates a contiguous block of dma memory of the requested size and 913 * alignment. 914 */ 915 static int 916 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 917 void **kvap, bus_size_t size, bus_size_t alignment) 918 { 919 int error; 920 921 dma->tag = NULL; 922 dma->size = size; 923 924 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 925 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 926 1, size, 0, NULL, NULL, &dma->tag); 927 if (error != 0) 928 goto fail; 929 930 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 931 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 932 if (error != 0) 933 goto fail; 934 935 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 936 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 937 if (error != 0) 938 goto fail; 939 940 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 941 942 if (kvap != NULL) 943 *kvap = dma->vaddr; 944 945 return 0; 946 947 fail: wpi_dma_contig_free(dma); 948 return error; 949 } 950 951 static void 952 wpi_dma_contig_free(struct wpi_dma_info *dma) 953 { 954 if (dma->vaddr != NULL) { 955 bus_dmamap_sync(dma->tag, dma->map, 956 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 957 bus_dmamap_unload(dma->tag, dma->map); 958 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 959 dma->vaddr = NULL; 960 } 961 if (dma->tag != NULL) { 962 bus_dma_tag_destroy(dma->tag); 963 dma->tag = NULL; 964 } 965 } 966 967 /* 968 * Allocate a shared page between host and NIC. 969 */ 970 static int 971 wpi_alloc_shared(struct wpi_softc *sc) 972 { 973 /* Shared buffer must be aligned on a 4KB boundary. */ 974 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 975 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 976 } 977 978 static void 979 wpi_free_shared(struct wpi_softc *sc) 980 { 981 wpi_dma_contig_free(&sc->shared_dma); 982 } 983 984 /* 985 * Allocate DMA-safe memory for firmware transfer. 986 */ 987 static int 988 wpi_alloc_fwmem(struct wpi_softc *sc) 989 { 990 /* Must be aligned on a 16-byte boundary. */ 991 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 992 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 993 } 994 995 static void 996 wpi_free_fwmem(struct wpi_softc *sc) 997 { 998 wpi_dma_contig_free(&sc->fw_dma); 999 } 1000 1001 static int 1002 wpi_alloc_rx_ring(struct wpi_softc *sc) 1003 { 1004 struct wpi_rx_ring *ring = &sc->rxq; 1005 bus_size_t size; 1006 int i, error; 1007 1008 ring->cur = 0; 1009 ring->update = 0; 1010 1011 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1012 1013 /* Allocate RX descriptors (16KB aligned.) */ 1014 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1015 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1016 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1017 if (error != 0) { 1018 device_printf(sc->sc_dev, 1019 "%s: could not allocate RX ring DMA memory, error %d\n", 1020 __func__, error); 1021 goto fail; 1022 } 1023 1024 /* Create RX buffer DMA tag. */ 1025 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1026 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1027 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1028 if (error != 0) { 1029 device_printf(sc->sc_dev, 1030 "%s: could not create RX buf DMA tag, error %d\n", 1031 __func__, error); 1032 goto fail; 1033 } 1034 1035 /* 1036 * Allocate and map RX buffers. 1037 */ 1038 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1039 struct wpi_rx_data *data = &ring->data[i]; 1040 bus_addr_t paddr; 1041 1042 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1043 if (error != 0) { 1044 device_printf(sc->sc_dev, 1045 "%s: could not create RX buf DMA map, error %d\n", 1046 __func__, error); 1047 goto fail; 1048 } 1049 1050 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1051 if (data->m == NULL) { 1052 device_printf(sc->sc_dev, 1053 "%s: could not allocate RX mbuf\n", __func__); 1054 error = ENOBUFS; 1055 goto fail; 1056 } 1057 1058 error = bus_dmamap_load(ring->data_dmat, data->map, 1059 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1060 &paddr, BUS_DMA_NOWAIT); 1061 if (error != 0 && error != EFBIG) { 1062 device_printf(sc->sc_dev, 1063 "%s: can't map mbuf (error %d)\n", __func__, 1064 error); 1065 goto fail; 1066 } 1067 1068 /* Set physical address of RX buffer. */ 1069 ring->desc[i] = htole32(paddr); 1070 } 1071 1072 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1073 BUS_DMASYNC_PREWRITE); 1074 1075 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1076 1077 return 0; 1078 1079 fail: wpi_free_rx_ring(sc); 1080 1081 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1082 1083 return error; 1084 } 1085 1086 static void 1087 wpi_update_rx_ring(struct wpi_softc *sc) 1088 { 1089 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1090 } 1091 1092 static void 1093 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1094 { 1095 struct wpi_rx_ring *ring = &sc->rxq; 1096 1097 if (ring->update != 0) { 1098 /* Wait for INT_WAKEUP event. */ 1099 return; 1100 } 1101 1102 WPI_TXQ_LOCK(sc); 1103 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1104 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1105 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1106 __func__); 1107 ring->update = 1; 1108 } else { 1109 wpi_update_rx_ring(sc); 1110 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1111 } 1112 WPI_TXQ_UNLOCK(sc); 1113 } 1114 1115 static void 1116 wpi_reset_rx_ring(struct wpi_softc *sc) 1117 { 1118 struct wpi_rx_ring *ring = &sc->rxq; 1119 int ntries; 1120 1121 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1122 1123 if (wpi_nic_lock(sc) == 0) { 1124 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1125 for (ntries = 0; ntries < 1000; ntries++) { 1126 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1127 WPI_FH_RX_STATUS_IDLE) 1128 break; 1129 DELAY(10); 1130 } 1131 wpi_nic_unlock(sc); 1132 } 1133 1134 ring->cur = 0; 1135 ring->update = 0; 1136 } 1137 1138 static void 1139 wpi_free_rx_ring(struct wpi_softc *sc) 1140 { 1141 struct wpi_rx_ring *ring = &sc->rxq; 1142 int i; 1143 1144 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1145 1146 wpi_dma_contig_free(&ring->desc_dma); 1147 1148 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1149 struct wpi_rx_data *data = &ring->data[i]; 1150 1151 if (data->m != NULL) { 1152 bus_dmamap_sync(ring->data_dmat, data->map, 1153 BUS_DMASYNC_POSTREAD); 1154 bus_dmamap_unload(ring->data_dmat, data->map); 1155 m_freem(data->m); 1156 data->m = NULL; 1157 } 1158 if (data->map != NULL) 1159 bus_dmamap_destroy(ring->data_dmat, data->map); 1160 } 1161 if (ring->data_dmat != NULL) { 1162 bus_dma_tag_destroy(ring->data_dmat); 1163 ring->data_dmat = NULL; 1164 } 1165 } 1166 1167 static int 1168 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1169 { 1170 bus_addr_t paddr; 1171 bus_size_t size; 1172 int i, error; 1173 1174 ring->qid = qid; 1175 ring->queued = 0; 1176 ring->cur = 0; 1177 ring->pending = 0; 1178 ring->update = 0; 1179 1180 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1181 1182 /* Allocate TX descriptors (16KB aligned.) */ 1183 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1184 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1185 size, WPI_RING_DMA_ALIGN); 1186 if (error != 0) { 1187 device_printf(sc->sc_dev, 1188 "%s: could not allocate TX ring DMA memory, error %d\n", 1189 __func__, error); 1190 goto fail; 1191 } 1192 1193 /* Update shared area with ring physical address. */ 1194 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1195 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1196 BUS_DMASYNC_PREWRITE); 1197 1198 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1199 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1200 size, 4); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not allocate TX cmd DMA memory, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1209 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1210 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1211 if (error != 0) { 1212 device_printf(sc->sc_dev, 1213 "%s: could not create TX buf DMA tag, error %d\n", 1214 __func__, error); 1215 goto fail; 1216 } 1217 1218 paddr = ring->cmd_dma.paddr; 1219 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1220 struct wpi_tx_data *data = &ring->data[i]; 1221 1222 data->cmd_paddr = paddr; 1223 paddr += sizeof (struct wpi_tx_cmd); 1224 1225 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1226 if (error != 0) { 1227 device_printf(sc->sc_dev, 1228 "%s: could not create TX buf DMA map, error %d\n", 1229 __func__, error); 1230 goto fail; 1231 } 1232 } 1233 1234 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1235 1236 return 0; 1237 1238 fail: wpi_free_tx_ring(sc, ring); 1239 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1240 return error; 1241 } 1242 1243 static void 1244 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1245 { 1246 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1247 } 1248 1249 static void 1250 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1251 { 1252 1253 if (ring->update != 0) { 1254 /* Wait for INT_WAKEUP event. */ 1255 return; 1256 } 1257 1258 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1259 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1260 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1261 __func__, ring->qid); 1262 ring->update = 1; 1263 } else { 1264 wpi_update_tx_ring(sc, ring); 1265 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1266 } 1267 } 1268 1269 static void 1270 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1271 { 1272 int i; 1273 1274 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1275 1276 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1277 struct wpi_tx_data *data = &ring->data[i]; 1278 1279 if (data->m != NULL) { 1280 bus_dmamap_sync(ring->data_dmat, data->map, 1281 BUS_DMASYNC_POSTWRITE); 1282 bus_dmamap_unload(ring->data_dmat, data->map); 1283 m_freem(data->m); 1284 data->m = NULL; 1285 } 1286 if (data->ni != NULL) { 1287 ieee80211_free_node(data->ni); 1288 data->ni = NULL; 1289 } 1290 } 1291 /* Clear TX descriptors. */ 1292 memset(ring->desc, 0, ring->desc_dma.size); 1293 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1294 BUS_DMASYNC_PREWRITE); 1295 ring->queued = 0; 1296 ring->cur = 0; 1297 ring->pending = 0; 1298 ring->update = 0; 1299 } 1300 1301 static void 1302 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1303 { 1304 int i; 1305 1306 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1307 1308 wpi_dma_contig_free(&ring->desc_dma); 1309 wpi_dma_contig_free(&ring->cmd_dma); 1310 1311 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1312 struct wpi_tx_data *data = &ring->data[i]; 1313 1314 if (data->m != NULL) { 1315 bus_dmamap_sync(ring->data_dmat, data->map, 1316 BUS_DMASYNC_POSTWRITE); 1317 bus_dmamap_unload(ring->data_dmat, data->map); 1318 m_freem(data->m); 1319 } 1320 if (data->map != NULL) 1321 bus_dmamap_destroy(ring->data_dmat, data->map); 1322 } 1323 if (ring->data_dmat != NULL) { 1324 bus_dma_tag_destroy(ring->data_dmat); 1325 ring->data_dmat = NULL; 1326 } 1327 } 1328 1329 /* 1330 * Extract various information from EEPROM. 1331 */ 1332 static int 1333 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1334 { 1335 #define WPI_CHK(res) do { \ 1336 if ((error = res) != 0) \ 1337 goto fail; \ 1338 } while (0) 1339 uint8_t i; 1340 int error; 1341 1342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1343 1344 /* Adapter has to be powered on for EEPROM access to work. */ 1345 if ((error = wpi_apm_init(sc)) != 0) { 1346 device_printf(sc->sc_dev, 1347 "%s: could not power ON adapter, error %d\n", __func__, 1348 error); 1349 return error; 1350 } 1351 1352 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1353 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1354 error = EIO; 1355 goto fail; 1356 } 1357 /* Clear HW ownership of EEPROM. */ 1358 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1359 1360 /* Read the hardware capabilities, revision and SKU type. */ 1361 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1362 sizeof(sc->cap))); 1363 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1364 sizeof(sc->rev))); 1365 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1366 sizeof(sc->type))); 1367 1368 sc->rev = le16toh(sc->rev); 1369 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1370 sc->rev, sc->type); 1371 1372 /* Read the regulatory domain (4 ASCII characters.) */ 1373 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1374 sizeof(sc->domain))); 1375 1376 /* Read MAC address. */ 1377 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1378 IEEE80211_ADDR_LEN)); 1379 1380 /* Read the list of authorized channels. */ 1381 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1382 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1383 1384 /* Read the list of TX power groups. */ 1385 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1386 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1387 1388 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1389 1390 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1391 __func__); 1392 1393 return error; 1394 #undef WPI_CHK 1395 } 1396 1397 /* 1398 * Translate EEPROM flags to net80211. 1399 */ 1400 static uint32_t 1401 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1402 { 1403 uint32_t nflags; 1404 1405 nflags = 0; 1406 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1407 nflags |= IEEE80211_CHAN_PASSIVE; 1408 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1409 nflags |= IEEE80211_CHAN_NOADHOC; 1410 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1411 nflags |= IEEE80211_CHAN_DFS; 1412 /* XXX apparently IBSS may still be marked */ 1413 nflags |= IEEE80211_CHAN_NOADHOC; 1414 } 1415 1416 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1417 if (nflags & IEEE80211_CHAN_NOADHOC) 1418 nflags |= IEEE80211_CHAN_NOHOSTAP; 1419 1420 return nflags; 1421 } 1422 1423 static void 1424 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans, 1425 int *nchans, struct ieee80211_channel chans[]) 1426 { 1427 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1428 const struct wpi_chan_band *band = &wpi_bands[n]; 1429 struct ieee80211_channel *c; 1430 uint32_t nflags; 1431 uint8_t chan, i; 1432 1433 for (i = 0; i < band->nchan; i++) { 1434 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1435 DPRINTF(sc, WPI_DEBUG_EEPROM, 1436 "Channel Not Valid: %d, band %d\n", 1437 band->chan[i],n); 1438 continue; 1439 } 1440 1441 if (*nchans >= maxchans) 1442 break; 1443 1444 chan = band->chan[i]; 1445 nflags = wpi_eeprom_channel_flags(&channels[i]); 1446 1447 c = &chans[(*nchans)++]; 1448 c->ic_ieee = chan; 1449 c->ic_maxregpower = channels[i].maxpwr; 1450 c->ic_maxpower = 2*c->ic_maxregpower; 1451 1452 if (n == 0) { /* 2GHz band */ 1453 c->ic_freq = ieee80211_ieee2mhz(chan, 1454 IEEE80211_CHAN_G); 1455 1456 /* G =>'s B is supported */ 1457 c->ic_flags = IEEE80211_CHAN_B | nflags; 1458 1459 if (*nchans >= maxchans) 1460 break; 1461 1462 c = &chans[(*nchans)++]; 1463 c[0] = c[-1]; 1464 c->ic_flags = IEEE80211_CHAN_G | nflags; 1465 } else { /* 5GHz band */ 1466 c->ic_freq = ieee80211_ieee2mhz(chan, 1467 IEEE80211_CHAN_A); 1468 1469 c->ic_flags = IEEE80211_CHAN_A | nflags; 1470 } 1471 1472 /* Save maximum allowed TX power for this channel. */ 1473 sc->maxpwr[chan] = channels[i].maxpwr; 1474 1475 DPRINTF(sc, WPI_DEBUG_EEPROM, 1476 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1477 " offset %d\n", chan, c->ic_freq, 1478 channels[i].flags, sc->maxpwr[chan], 1479 IEEE80211_IS_CHAN_PASSIVE(c), *nchans); 1480 } 1481 } 1482 1483 /** 1484 * Read the eeprom to find out what channels are valid for the given 1485 * band and update net80211 with what we find. 1486 */ 1487 static int 1488 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1489 { 1490 struct ieee80211com *ic = &sc->sc_ic; 1491 const struct wpi_chan_band *band = &wpi_bands[n]; 1492 int error; 1493 1494 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1495 1496 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1497 band->nchan * sizeof (struct wpi_eeprom_chan)); 1498 if (error != 0) { 1499 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1500 return error; 1501 } 1502 1503 wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 1504 ic->ic_channels); 1505 1506 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1507 1508 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1509 1510 return 0; 1511 } 1512 1513 static struct wpi_eeprom_chan * 1514 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1515 { 1516 int i, j; 1517 1518 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1519 for (i = 0; i < wpi_bands[j].nchan; i++) 1520 if (wpi_bands[j].chan[i] == c->ic_ieee) 1521 return &sc->eeprom_channels[j][i]; 1522 1523 return NULL; 1524 } 1525 1526 static void 1527 wpi_getradiocaps(struct ieee80211com *ic, 1528 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1529 { 1530 struct wpi_softc *sc = ic->ic_softc; 1531 int i; 1532 1533 /* Parse the list of authorized channels. */ 1534 for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++) 1535 wpi_read_eeprom_band(sc, i, maxchans, nchans, chans); 1536 } 1537 1538 /* 1539 * Enforce flags read from EEPROM. 1540 */ 1541 static int 1542 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1543 int nchan, struct ieee80211_channel chans[]) 1544 { 1545 struct wpi_softc *sc = ic->ic_softc; 1546 int i; 1547 1548 for (i = 0; i < nchan; i++) { 1549 struct ieee80211_channel *c = &chans[i]; 1550 struct wpi_eeprom_chan *channel; 1551 1552 channel = wpi_find_eeprom_channel(sc, c); 1553 if (channel == NULL) { 1554 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1555 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1556 return EINVAL; 1557 } 1558 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1559 } 1560 1561 return 0; 1562 } 1563 1564 static int 1565 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1566 { 1567 struct wpi_power_group *group = &sc->groups[n]; 1568 struct wpi_eeprom_group rgroup; 1569 int i, error; 1570 1571 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1572 1573 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1574 &rgroup, sizeof rgroup)) != 0) { 1575 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1576 return error; 1577 } 1578 1579 /* Save TX power group information. */ 1580 group->chan = rgroup.chan; 1581 group->maxpwr = rgroup.maxpwr; 1582 /* Retrieve temperature at which the samples were taken. */ 1583 group->temp = (int16_t)le16toh(rgroup.temp); 1584 1585 DPRINTF(sc, WPI_DEBUG_EEPROM, 1586 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1587 group->maxpwr, group->temp); 1588 1589 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1590 group->samples[i].index = rgroup.samples[i].index; 1591 group->samples[i].power = rgroup.samples[i].power; 1592 1593 DPRINTF(sc, WPI_DEBUG_EEPROM, 1594 "\tsample %d: index=%d power=%d\n", i, 1595 group->samples[i].index, group->samples[i].power); 1596 } 1597 1598 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1599 1600 return 0; 1601 } 1602 1603 static __inline uint8_t 1604 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1605 { 1606 uint8_t newid = WPI_ID_IBSS_MIN; 1607 1608 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1609 if ((sc->nodesmsk & (1 << newid)) == 0) { 1610 sc->nodesmsk |= 1 << newid; 1611 return newid; 1612 } 1613 } 1614 1615 return WPI_ID_UNDEFINED; 1616 } 1617 1618 static __inline uint8_t 1619 wpi_add_node_entry_sta(struct wpi_softc *sc) 1620 { 1621 sc->nodesmsk |= 1 << WPI_ID_BSS; 1622 1623 return WPI_ID_BSS; 1624 } 1625 1626 static __inline int 1627 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1628 { 1629 if (id == WPI_ID_UNDEFINED) 1630 return 0; 1631 1632 return (sc->nodesmsk >> id) & 1; 1633 } 1634 1635 static __inline void 1636 wpi_clear_node_table(struct wpi_softc *sc) 1637 { 1638 sc->nodesmsk = 0; 1639 } 1640 1641 static __inline void 1642 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1643 { 1644 sc->nodesmsk &= ~(1 << id); 1645 } 1646 1647 static struct ieee80211_node * 1648 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1649 { 1650 struct wpi_node *wn; 1651 1652 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1653 M_NOWAIT | M_ZERO); 1654 1655 if (wn == NULL) 1656 return NULL; 1657 1658 wn->id = WPI_ID_UNDEFINED; 1659 1660 return &wn->ni; 1661 } 1662 1663 static void 1664 wpi_node_free(struct ieee80211_node *ni) 1665 { 1666 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1667 struct wpi_node *wn = WPI_NODE(ni); 1668 1669 if (wn->id != WPI_ID_UNDEFINED) { 1670 WPI_NT_LOCK(sc); 1671 if (wpi_check_node_entry(sc, wn->id)) { 1672 wpi_del_node_entry(sc, wn->id); 1673 wpi_del_node(sc, ni); 1674 } 1675 WPI_NT_UNLOCK(sc); 1676 } 1677 1678 sc->sc_node_free(ni); 1679 } 1680 1681 static __inline int 1682 wpi_check_bss_filter(struct wpi_softc *sc) 1683 { 1684 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1685 } 1686 1687 static void 1688 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1689 const struct ieee80211_rx_stats *rxs, 1690 int rssi, int nf) 1691 { 1692 struct ieee80211vap *vap = ni->ni_vap; 1693 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1694 struct wpi_vap *wvp = WPI_VAP(vap); 1695 uint64_t ni_tstamp, rx_tstamp; 1696 1697 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1698 1699 if (vap->iv_state == IEEE80211_S_RUN && 1700 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1701 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1702 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1703 rx_tstamp = le64toh(sc->rx_tstamp); 1704 1705 if (ni_tstamp >= rx_tstamp) { 1706 DPRINTF(sc, WPI_DEBUG_STATE, 1707 "ibss merge, tsf %ju tstamp %ju\n", 1708 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1709 (void) ieee80211_ibss_merge(ni); 1710 } 1711 } 1712 } 1713 1714 static void 1715 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1716 { 1717 struct wpi_softc *sc = arg; 1718 struct wpi_node *wn = WPI_NODE(ni); 1719 int error; 1720 1721 WPI_NT_LOCK(sc); 1722 if (wn->id != WPI_ID_UNDEFINED) { 1723 wn->id = WPI_ID_UNDEFINED; 1724 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1725 device_printf(sc->sc_dev, 1726 "%s: could not add IBSS node, error %d\n", 1727 __func__, error); 1728 } 1729 } 1730 WPI_NT_UNLOCK(sc); 1731 } 1732 1733 static void 1734 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1735 { 1736 struct ieee80211com *ic = &sc->sc_ic; 1737 1738 /* Set group keys once. */ 1739 WPI_NT_LOCK(sc); 1740 wvp->wv_gtk = 0; 1741 WPI_NT_UNLOCK(sc); 1742 1743 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1744 ieee80211_crypto_reload_keys(ic); 1745 } 1746 1747 /** 1748 * Called by net80211 when ever there is a change to 80211 state machine 1749 */ 1750 static int 1751 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1752 { 1753 struct wpi_vap *wvp = WPI_VAP(vap); 1754 struct ieee80211com *ic = vap->iv_ic; 1755 struct wpi_softc *sc = ic->ic_softc; 1756 int error = 0; 1757 1758 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1759 1760 WPI_TXQ_LOCK(sc); 1761 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1762 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1763 WPI_TXQ_UNLOCK(sc); 1764 1765 return ENXIO; 1766 } 1767 WPI_TXQ_UNLOCK(sc); 1768 1769 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1770 ieee80211_state_name[vap->iv_state], 1771 ieee80211_state_name[nstate]); 1772 1773 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1774 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1775 device_printf(sc->sc_dev, 1776 "%s: could not set power saving level\n", 1777 __func__); 1778 return error; 1779 } 1780 1781 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1782 } 1783 1784 switch (nstate) { 1785 case IEEE80211_S_SCAN: 1786 WPI_RXON_LOCK(sc); 1787 if (wpi_check_bss_filter(sc) != 0) { 1788 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1789 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1790 device_printf(sc->sc_dev, 1791 "%s: could not send RXON\n", __func__); 1792 } 1793 } 1794 WPI_RXON_UNLOCK(sc); 1795 break; 1796 1797 case IEEE80211_S_ASSOC: 1798 if (vap->iv_state != IEEE80211_S_RUN) 1799 break; 1800 /* FALLTHROUGH */ 1801 case IEEE80211_S_AUTH: 1802 /* 1803 * NB: do not optimize AUTH -> AUTH state transmission - 1804 * this will break powersave with non-QoS AP! 1805 */ 1806 1807 /* 1808 * The node must be registered in the firmware before auth. 1809 * Also the associd must be cleared on RUN -> ASSOC 1810 * transitions. 1811 */ 1812 if ((error = wpi_auth(sc, vap)) != 0) { 1813 device_printf(sc->sc_dev, 1814 "%s: could not move to AUTH state, error %d\n", 1815 __func__, error); 1816 } 1817 break; 1818 1819 case IEEE80211_S_RUN: 1820 /* 1821 * RUN -> RUN transition: 1822 * STA mode: Just restart the timers. 1823 * IBSS mode: Process IBSS merge. 1824 */ 1825 if (vap->iv_state == IEEE80211_S_RUN) { 1826 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1827 WPI_RXON_LOCK(sc); 1828 wpi_calib_timeout(sc); 1829 WPI_RXON_UNLOCK(sc); 1830 break; 1831 } else { 1832 /* 1833 * Drop the BSS_FILTER bit 1834 * (there is no another way to change bssid). 1835 */ 1836 WPI_RXON_LOCK(sc); 1837 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1838 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1839 device_printf(sc->sc_dev, 1840 "%s: could not send RXON\n", 1841 __func__); 1842 } 1843 WPI_RXON_UNLOCK(sc); 1844 1845 /* Restore all what was lost. */ 1846 wpi_restore_node_table(sc, wvp); 1847 1848 /* XXX set conditionally? */ 1849 wpi_updateedca(ic); 1850 } 1851 } 1852 1853 /* 1854 * !RUN -> RUN requires setting the association id 1855 * which is done with a firmware cmd. We also defer 1856 * starting the timers until that work is done. 1857 */ 1858 if ((error = wpi_run(sc, vap)) != 0) { 1859 device_printf(sc->sc_dev, 1860 "%s: could not move to RUN state\n", __func__); 1861 } 1862 break; 1863 1864 default: 1865 break; 1866 } 1867 if (error != 0) { 1868 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1869 return error; 1870 } 1871 1872 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1873 1874 return wvp->wv_newstate(vap, nstate, arg); 1875 } 1876 1877 static void 1878 wpi_calib_timeout(void *arg) 1879 { 1880 struct wpi_softc *sc = arg; 1881 1882 if (wpi_check_bss_filter(sc) == 0) 1883 return; 1884 1885 wpi_power_calibration(sc); 1886 1887 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1888 } 1889 1890 static __inline uint8_t 1891 rate2plcp(const uint8_t rate) 1892 { 1893 switch (rate) { 1894 case 12: return 0xd; 1895 case 18: return 0xf; 1896 case 24: return 0x5; 1897 case 36: return 0x7; 1898 case 48: return 0x9; 1899 case 72: return 0xb; 1900 case 96: return 0x1; 1901 case 108: return 0x3; 1902 case 2: return 10; 1903 case 4: return 20; 1904 case 11: return 55; 1905 case 22: return 110; 1906 default: return 0; 1907 } 1908 } 1909 1910 static __inline uint8_t 1911 plcp2rate(const uint8_t plcp) 1912 { 1913 switch (plcp) { 1914 case 0xd: return 12; 1915 case 0xf: return 18; 1916 case 0x5: return 24; 1917 case 0x7: return 36; 1918 case 0x9: return 48; 1919 case 0xb: return 72; 1920 case 0x1: return 96; 1921 case 0x3: return 108; 1922 case 10: return 2; 1923 case 20: return 4; 1924 case 55: return 11; 1925 case 110: return 22; 1926 default: return 0; 1927 } 1928 } 1929 1930 /* Quickly determine if a given rate is CCK or OFDM. */ 1931 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1932 1933 static void 1934 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1935 struct wpi_rx_data *data) 1936 { 1937 struct ieee80211com *ic = &sc->sc_ic; 1938 struct wpi_rx_ring *ring = &sc->rxq; 1939 struct wpi_rx_stat *stat; 1940 struct wpi_rx_head *head; 1941 struct wpi_rx_tail *tail; 1942 struct ieee80211_frame *wh; 1943 struct ieee80211_node *ni; 1944 struct mbuf *m, *m1; 1945 bus_addr_t paddr; 1946 uint32_t flags; 1947 uint16_t len; 1948 int error; 1949 1950 stat = (struct wpi_rx_stat *)(desc + 1); 1951 1952 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1953 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1954 goto fail1; 1955 } 1956 1957 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1958 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1959 len = le16toh(head->len); 1960 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1961 flags = le32toh(tail->flags); 1962 1963 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1964 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1965 le32toh(desc->len), len, (int8_t)stat->rssi, 1966 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1967 1968 /* Discard frames with a bad FCS early. */ 1969 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1970 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1971 __func__, flags); 1972 goto fail1; 1973 } 1974 /* Discard frames that are too short. */ 1975 if (len < sizeof (struct ieee80211_frame_ack)) { 1976 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1977 __func__, len); 1978 goto fail1; 1979 } 1980 1981 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1982 if (__predict_false(m1 == NULL)) { 1983 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1984 __func__); 1985 goto fail1; 1986 } 1987 bus_dmamap_unload(ring->data_dmat, data->map); 1988 1989 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1990 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1991 if (__predict_false(error != 0 && error != EFBIG)) { 1992 device_printf(sc->sc_dev, 1993 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1994 m_freem(m1); 1995 1996 /* Try to reload the old mbuf. */ 1997 error = bus_dmamap_load(ring->data_dmat, data->map, 1998 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1999 &paddr, BUS_DMA_NOWAIT); 2000 if (error != 0 && error != EFBIG) { 2001 panic("%s: could not load old RX mbuf", __func__); 2002 } 2003 /* Physical address may have changed. */ 2004 ring->desc[ring->cur] = htole32(paddr); 2005 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2006 BUS_DMASYNC_PREWRITE); 2007 goto fail1; 2008 } 2009 2010 m = data->m; 2011 data->m = m1; 2012 /* Update RX descriptor. */ 2013 ring->desc[ring->cur] = htole32(paddr); 2014 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2015 BUS_DMASYNC_PREWRITE); 2016 2017 /* Finalize mbuf. */ 2018 m->m_data = (caddr_t)(head + 1); 2019 m->m_pkthdr.len = m->m_len = len; 2020 2021 /* Grab a reference to the source node. */ 2022 wh = mtod(m, struct ieee80211_frame *); 2023 2024 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2025 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2026 /* Check whether decryption was successful or not. */ 2027 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2028 DPRINTF(sc, WPI_DEBUG_RECV, 2029 "CCMP decryption failed 0x%x\n", flags); 2030 goto fail2; 2031 } 2032 m->m_flags |= M_WEP; 2033 } 2034 2035 if (len >= sizeof(struct ieee80211_frame_min)) 2036 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2037 else 2038 ni = NULL; 2039 2040 sc->rx_tstamp = tail->tstamp; 2041 2042 if (ieee80211_radiotap_active(ic)) { 2043 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2044 2045 tap->wr_flags = 0; 2046 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2047 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2048 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2049 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2050 tap->wr_tsft = tail->tstamp; 2051 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2052 tap->wr_rate = plcp2rate(head->plcp); 2053 } 2054 2055 WPI_UNLOCK(sc); 2056 2057 /* Send the frame to the 802.11 layer. */ 2058 if (ni != NULL) { 2059 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2060 /* Node is no longer needed. */ 2061 ieee80211_free_node(ni); 2062 } else 2063 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2064 2065 WPI_LOCK(sc); 2066 2067 return; 2068 2069 fail2: m_freem(m); 2070 2071 fail1: counter_u64_add(ic->ic_ierrors, 1); 2072 } 2073 2074 static void 2075 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2076 struct wpi_rx_data *data) 2077 { 2078 /* Ignore */ 2079 } 2080 2081 static void 2082 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2083 { 2084 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2085 struct wpi_tx_data *data = &ring->data[desc->idx]; 2086 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2087 struct mbuf *m; 2088 struct ieee80211_node *ni; 2089 struct ieee80211vap *vap; 2090 struct ieee80211com *ic; 2091 uint32_t status = le32toh(stat->status); 2092 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2093 2094 KASSERT(data->ni != NULL, ("no node")); 2095 KASSERT(data->m != NULL, ("no mbuf")); 2096 2097 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2098 2099 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2100 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2101 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2102 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2103 2104 /* Unmap and free mbuf. */ 2105 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2106 bus_dmamap_unload(ring->data_dmat, data->map); 2107 m = data->m, data->m = NULL; 2108 ni = data->ni, data->ni = NULL; 2109 vap = ni->ni_vap; 2110 ic = vap->iv_ic; 2111 2112 /* 2113 * Update rate control statistics for the node. 2114 */ 2115 if (status & WPI_TX_STATUS_FAIL) { 2116 ieee80211_ratectl_tx_complete(vap, ni, 2117 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2118 } else 2119 ieee80211_ratectl_tx_complete(vap, ni, 2120 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2121 2122 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2123 2124 WPI_TXQ_STATE_LOCK(sc); 2125 if (--ring->queued > 0) 2126 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2127 else 2128 callout_stop(&sc->tx_timeout); 2129 WPI_TXQ_STATE_UNLOCK(sc); 2130 2131 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2132 } 2133 2134 /* 2135 * Process a "command done" firmware notification. This is where we wakeup 2136 * processes waiting for a synchronous command completion. 2137 */ 2138 static void 2139 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2140 { 2141 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2142 struct wpi_tx_data *data; 2143 struct wpi_tx_cmd *cmd; 2144 2145 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2146 "type %s len %d\n", desc->qid, desc->idx, 2147 desc->flags, wpi_cmd_str(desc->type), 2148 le32toh(desc->len)); 2149 2150 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2151 return; /* Not a command ack. */ 2152 2153 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2154 2155 data = &ring->data[desc->idx]; 2156 cmd = &ring->cmd[desc->idx]; 2157 2158 /* If the command was mapped in an mbuf, free it. */ 2159 if (data->m != NULL) { 2160 bus_dmamap_sync(ring->data_dmat, data->map, 2161 BUS_DMASYNC_POSTWRITE); 2162 bus_dmamap_unload(ring->data_dmat, data->map); 2163 m_freem(data->m); 2164 data->m = NULL; 2165 } 2166 2167 wakeup(cmd); 2168 2169 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2170 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2171 2172 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2173 BUS_DMASYNC_POSTREAD); 2174 2175 WPI_TXQ_LOCK(sc); 2176 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2177 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2178 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2179 } else { 2180 sc->sc_update_rx_ring = wpi_update_rx_ring; 2181 sc->sc_update_tx_ring = wpi_update_tx_ring; 2182 } 2183 WPI_TXQ_UNLOCK(sc); 2184 } 2185 } 2186 2187 static void 2188 wpi_notif_intr(struct wpi_softc *sc) 2189 { 2190 struct ieee80211com *ic = &sc->sc_ic; 2191 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2192 uint32_t hw; 2193 2194 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2195 BUS_DMASYNC_POSTREAD); 2196 2197 hw = le32toh(sc->shared->next) & 0xfff; 2198 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2199 2200 while (sc->rxq.cur != hw) { 2201 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2202 2203 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2204 struct wpi_rx_desc *desc; 2205 2206 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2207 BUS_DMASYNC_POSTREAD); 2208 desc = mtod(data->m, struct wpi_rx_desc *); 2209 2210 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2211 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2212 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2213 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2214 2215 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2216 /* Reply to a command. */ 2217 wpi_cmd_done(sc, desc); 2218 } 2219 2220 switch (desc->type) { 2221 case WPI_RX_DONE: 2222 /* An 802.11 frame has been received. */ 2223 wpi_rx_done(sc, desc, data); 2224 2225 if (__predict_false(sc->sc_running == 0)) { 2226 /* wpi_stop() was called. */ 2227 return; 2228 } 2229 2230 break; 2231 2232 case WPI_TX_DONE: 2233 /* An 802.11 frame has been transmitted. */ 2234 wpi_tx_done(sc, desc); 2235 break; 2236 2237 case WPI_RX_STATISTICS: 2238 case WPI_BEACON_STATISTICS: 2239 wpi_rx_statistics(sc, desc, data); 2240 break; 2241 2242 case WPI_BEACON_MISSED: 2243 { 2244 struct wpi_beacon_missed *miss = 2245 (struct wpi_beacon_missed *)(desc + 1); 2246 uint32_t expected, misses, received, threshold; 2247 2248 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2249 BUS_DMASYNC_POSTREAD); 2250 2251 misses = le32toh(miss->consecutive); 2252 expected = le32toh(miss->expected); 2253 received = le32toh(miss->received); 2254 threshold = MAX(2, vap->iv_bmissthreshold); 2255 2256 DPRINTF(sc, WPI_DEBUG_BMISS, 2257 "%s: beacons missed %u(%u) (received %u/%u)\n", 2258 __func__, misses, le32toh(miss->total), received, 2259 expected); 2260 2261 if (misses >= threshold || 2262 (received == 0 && expected >= threshold)) { 2263 WPI_RXON_LOCK(sc); 2264 if (callout_pending(&sc->scan_timeout)) { 2265 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2266 0, 1); 2267 } 2268 WPI_RXON_UNLOCK(sc); 2269 if (vap->iv_state == IEEE80211_S_RUN && 2270 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2271 ieee80211_beacon_miss(ic); 2272 } 2273 2274 break; 2275 } 2276 #ifdef WPI_DEBUG 2277 case WPI_BEACON_SENT: 2278 { 2279 struct wpi_tx_stat *stat = 2280 (struct wpi_tx_stat *)(desc + 1); 2281 uint64_t *tsf = (uint64_t *)(stat + 1); 2282 uint32_t *mode = (uint32_t *)(tsf + 1); 2283 2284 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2285 BUS_DMASYNC_POSTREAD); 2286 2287 DPRINTF(sc, WPI_DEBUG_BEACON, 2288 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2289 "duration %u, status %x, tsf %ju, mode %x\n", 2290 stat->rtsfailcnt, stat->ackfailcnt, 2291 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2292 le32toh(stat->status), le64toh(*tsf), 2293 le32toh(*mode)); 2294 2295 break; 2296 } 2297 #endif 2298 case WPI_UC_READY: 2299 { 2300 struct wpi_ucode_info *uc = 2301 (struct wpi_ucode_info *)(desc + 1); 2302 2303 /* The microcontroller is ready. */ 2304 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2305 BUS_DMASYNC_POSTREAD); 2306 DPRINTF(sc, WPI_DEBUG_RESET, 2307 "microcode alive notification version=%d.%d " 2308 "subtype=%x alive=%x\n", uc->major, uc->minor, 2309 uc->subtype, le32toh(uc->valid)); 2310 2311 if (le32toh(uc->valid) != 1) { 2312 device_printf(sc->sc_dev, 2313 "microcontroller initialization failed\n"); 2314 wpi_stop_locked(sc); 2315 return; 2316 } 2317 /* Save the address of the error log in SRAM. */ 2318 sc->errptr = le32toh(uc->errptr); 2319 break; 2320 } 2321 case WPI_STATE_CHANGED: 2322 { 2323 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2324 BUS_DMASYNC_POSTREAD); 2325 2326 uint32_t *status = (uint32_t *)(desc + 1); 2327 2328 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2329 le32toh(*status)); 2330 2331 if (le32toh(*status) & 1) { 2332 WPI_NT_LOCK(sc); 2333 wpi_clear_node_table(sc); 2334 WPI_NT_UNLOCK(sc); 2335 taskqueue_enqueue(sc->sc_tq, 2336 &sc->sc_radiooff_task); 2337 return; 2338 } 2339 break; 2340 } 2341 #ifdef WPI_DEBUG 2342 case WPI_START_SCAN: 2343 { 2344 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2345 BUS_DMASYNC_POSTREAD); 2346 2347 struct wpi_start_scan *scan = 2348 (struct wpi_start_scan *)(desc + 1); 2349 DPRINTF(sc, WPI_DEBUG_SCAN, 2350 "%s: scanning channel %d status %x\n", 2351 __func__, scan->chan, le32toh(scan->status)); 2352 2353 break; 2354 } 2355 #endif 2356 case WPI_STOP_SCAN: 2357 { 2358 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2359 BUS_DMASYNC_POSTREAD); 2360 2361 struct wpi_stop_scan *scan = 2362 (struct wpi_stop_scan *)(desc + 1); 2363 2364 DPRINTF(sc, WPI_DEBUG_SCAN, 2365 "scan finished nchan=%d status=%d chan=%d\n", 2366 scan->nchan, scan->status, scan->chan); 2367 2368 WPI_RXON_LOCK(sc); 2369 callout_stop(&sc->scan_timeout); 2370 WPI_RXON_UNLOCK(sc); 2371 if (scan->status == WPI_SCAN_ABORTED) 2372 ieee80211_cancel_scan(vap); 2373 else 2374 ieee80211_scan_next(vap); 2375 break; 2376 } 2377 } 2378 2379 if (sc->rxq.cur % 8 == 0) { 2380 /* Tell the firmware what we have processed. */ 2381 sc->sc_update_rx_ring(sc); 2382 } 2383 } 2384 } 2385 2386 /* 2387 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2388 * from power-down sleep mode. 2389 */ 2390 static void 2391 wpi_wakeup_intr(struct wpi_softc *sc) 2392 { 2393 int qid; 2394 2395 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2396 "%s: ucode wakeup from power-down sleep\n", __func__); 2397 2398 /* Wakeup RX and TX rings. */ 2399 if (sc->rxq.update) { 2400 sc->rxq.update = 0; 2401 wpi_update_rx_ring(sc); 2402 } 2403 WPI_TXQ_LOCK(sc); 2404 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2405 struct wpi_tx_ring *ring = &sc->txq[qid]; 2406 2407 if (ring->update) { 2408 ring->update = 0; 2409 wpi_update_tx_ring(sc, ring); 2410 } 2411 } 2412 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2413 WPI_TXQ_UNLOCK(sc); 2414 } 2415 2416 /* 2417 * This function prints firmware registers 2418 */ 2419 #ifdef WPI_DEBUG 2420 static void 2421 wpi_debug_registers(struct wpi_softc *sc) 2422 { 2423 size_t i; 2424 static const uint32_t csr_tbl[] = { 2425 WPI_HW_IF_CONFIG, 2426 WPI_INT, 2427 WPI_INT_MASK, 2428 WPI_FH_INT, 2429 WPI_GPIO_IN, 2430 WPI_RESET, 2431 WPI_GP_CNTRL, 2432 WPI_EEPROM, 2433 WPI_EEPROM_GP, 2434 WPI_GIO, 2435 WPI_UCODE_GP1, 2436 WPI_UCODE_GP2, 2437 WPI_GIO_CHICKEN, 2438 WPI_ANA_PLL, 2439 WPI_DBG_HPET_MEM, 2440 }; 2441 static const uint32_t prph_tbl[] = { 2442 WPI_APMG_CLK_CTRL, 2443 WPI_APMG_PS, 2444 WPI_APMG_PCI_STT, 2445 WPI_APMG_RFKILL, 2446 }; 2447 2448 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2449 2450 for (i = 0; i < nitems(csr_tbl); i++) { 2451 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2452 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2453 2454 if ((i + 1) % 2 == 0) 2455 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2456 } 2457 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2458 2459 if (wpi_nic_lock(sc) == 0) { 2460 for (i = 0; i < nitems(prph_tbl); i++) { 2461 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2462 wpi_get_prph_string(prph_tbl[i]), 2463 wpi_prph_read(sc, prph_tbl[i])); 2464 2465 if ((i + 1) % 2 == 0) 2466 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2467 } 2468 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2469 wpi_nic_unlock(sc); 2470 } else { 2471 DPRINTF(sc, WPI_DEBUG_REGISTER, 2472 "Cannot access internal registers.\n"); 2473 } 2474 } 2475 #endif 2476 2477 /* 2478 * Dump the error log of the firmware when a firmware panic occurs. Although 2479 * we can't debug the firmware because it is neither open source nor free, it 2480 * can help us to identify certain classes of problems. 2481 */ 2482 static void 2483 wpi_fatal_intr(struct wpi_softc *sc) 2484 { 2485 struct wpi_fw_dump dump; 2486 uint32_t i, offset, count; 2487 2488 /* Check that the error log address is valid. */ 2489 if (sc->errptr < WPI_FW_DATA_BASE || 2490 sc->errptr + sizeof (dump) > 2491 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2492 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2493 sc->errptr); 2494 return; 2495 } 2496 if (wpi_nic_lock(sc) != 0) { 2497 printf("%s: could not read firmware error log\n", __func__); 2498 return; 2499 } 2500 /* Read number of entries in the log. */ 2501 count = wpi_mem_read(sc, sc->errptr); 2502 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2503 printf("%s: invalid count field (count = %u)\n", __func__, 2504 count); 2505 wpi_nic_unlock(sc); 2506 return; 2507 } 2508 /* Skip "count" field. */ 2509 offset = sc->errptr + sizeof (uint32_t); 2510 printf("firmware error log (count = %u):\n", count); 2511 for (i = 0; i < count; i++) { 2512 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2513 sizeof (dump) / sizeof (uint32_t)); 2514 2515 printf(" error type = \"%s\" (0x%08X)\n", 2516 (dump.desc < nitems(wpi_fw_errmsg)) ? 2517 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2518 dump.desc); 2519 printf(" error data = 0x%08X\n", 2520 dump.data); 2521 printf(" branch link = 0x%08X%08X\n", 2522 dump.blink[0], dump.blink[1]); 2523 printf(" interrupt link = 0x%08X%08X\n", 2524 dump.ilink[0], dump.ilink[1]); 2525 printf(" time = %u\n", dump.time); 2526 2527 offset += sizeof (dump); 2528 } 2529 wpi_nic_unlock(sc); 2530 /* Dump driver status (TX and RX rings) while we're here. */ 2531 printf("driver status:\n"); 2532 WPI_TXQ_LOCK(sc); 2533 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2534 struct wpi_tx_ring *ring = &sc->txq[i]; 2535 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2536 i, ring->qid, ring->cur, ring->queued); 2537 } 2538 WPI_TXQ_UNLOCK(sc); 2539 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2540 } 2541 2542 static void 2543 wpi_intr(void *arg) 2544 { 2545 struct wpi_softc *sc = arg; 2546 uint32_t r1, r2; 2547 2548 WPI_LOCK(sc); 2549 2550 /* Disable interrupts. */ 2551 WPI_WRITE(sc, WPI_INT_MASK, 0); 2552 2553 r1 = WPI_READ(sc, WPI_INT); 2554 2555 if (__predict_false(r1 == 0xffffffff || 2556 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2557 goto end; /* Hardware gone! */ 2558 2559 r2 = WPI_READ(sc, WPI_FH_INT); 2560 2561 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2562 r1, r2); 2563 2564 if (r1 == 0 && r2 == 0) 2565 goto done; /* Interrupt not for us. */ 2566 2567 /* Acknowledge interrupts. */ 2568 WPI_WRITE(sc, WPI_INT, r1); 2569 WPI_WRITE(sc, WPI_FH_INT, r2); 2570 2571 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2572 device_printf(sc->sc_dev, "fatal firmware error\n"); 2573 #ifdef WPI_DEBUG 2574 wpi_debug_registers(sc); 2575 #endif 2576 wpi_fatal_intr(sc); 2577 DPRINTF(sc, WPI_DEBUG_HW, 2578 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2579 "(Hardware Error)"); 2580 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2581 goto end; 2582 } 2583 2584 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2585 (r2 & WPI_FH_INT_RX)) 2586 wpi_notif_intr(sc); 2587 2588 if (r1 & WPI_INT_ALIVE) 2589 wakeup(sc); /* Firmware is alive. */ 2590 2591 if (r1 & WPI_INT_WAKEUP) 2592 wpi_wakeup_intr(sc); 2593 2594 done: 2595 /* Re-enable interrupts. */ 2596 if (__predict_true(sc->sc_running)) 2597 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2598 2599 end: WPI_UNLOCK(sc); 2600 } 2601 2602 static void 2603 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2604 { 2605 struct wpi_tx_ring *ring; 2606 struct wpi_tx_data *data; 2607 uint8_t cur; 2608 2609 WPI_TXQ_LOCK(sc); 2610 ring = &sc->txq[ac]; 2611 2612 while (ring->pending != 0) { 2613 ring->pending--; 2614 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2615 data = &ring->data[cur]; 2616 2617 bus_dmamap_sync(ring->data_dmat, data->map, 2618 BUS_DMASYNC_POSTWRITE); 2619 bus_dmamap_unload(ring->data_dmat, data->map); 2620 m_freem(data->m); 2621 data->m = NULL; 2622 2623 ieee80211_node_decref(data->ni); 2624 data->ni = NULL; 2625 } 2626 2627 WPI_TXQ_UNLOCK(sc); 2628 } 2629 2630 static int 2631 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2632 { 2633 struct ieee80211_frame *wh; 2634 struct wpi_tx_cmd *cmd; 2635 struct wpi_tx_data *data; 2636 struct wpi_tx_desc *desc; 2637 struct wpi_tx_ring *ring; 2638 struct mbuf *m1; 2639 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2640 uint8_t cur, pad; 2641 uint16_t hdrlen; 2642 int error, i, nsegs, totlen, frag; 2643 2644 WPI_TXQ_LOCK(sc); 2645 2646 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2647 2648 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2649 2650 if (__predict_false(sc->sc_running == 0)) { 2651 /* wpi_stop() was called */ 2652 error = ENETDOWN; 2653 goto end; 2654 } 2655 2656 wh = mtod(buf->m, struct ieee80211_frame *); 2657 hdrlen = ieee80211_anyhdrsize(wh); 2658 totlen = buf->m->m_pkthdr.len; 2659 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2660 2661 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2662 error = EINVAL; 2663 goto end; 2664 } 2665 2666 if (hdrlen & 3) { 2667 /* First segment length must be a multiple of 4. */ 2668 pad = 4 - (hdrlen & 3); 2669 } else 2670 pad = 0; 2671 2672 ring = &sc->txq[buf->ac]; 2673 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2674 desc = &ring->desc[cur]; 2675 data = &ring->data[cur]; 2676 2677 /* Prepare TX firmware command. */ 2678 cmd = &ring->cmd[cur]; 2679 cmd->code = buf->code; 2680 cmd->flags = 0; 2681 cmd->qid = ring->qid; 2682 cmd->idx = cur; 2683 2684 memcpy(cmd->data, buf->data, buf->size); 2685 2686 /* Save and trim IEEE802.11 header. */ 2687 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2688 m_adj(buf->m, hdrlen); 2689 2690 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2691 segs, &nsegs, BUS_DMA_NOWAIT); 2692 if (error != 0 && error != EFBIG) { 2693 device_printf(sc->sc_dev, 2694 "%s: can't map mbuf (error %d)\n", __func__, error); 2695 goto end; 2696 } 2697 if (error != 0) { 2698 /* Too many DMA segments, linearize mbuf. */ 2699 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2700 if (m1 == NULL) { 2701 device_printf(sc->sc_dev, 2702 "%s: could not defrag mbuf\n", __func__); 2703 error = ENOBUFS; 2704 goto end; 2705 } 2706 buf->m = m1; 2707 2708 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2709 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2710 if (__predict_false(error != 0)) { 2711 /* XXX fix this (applicable to the iwn(4) too) */ 2712 /* 2713 * NB: Do not return error; 2714 * original mbuf does not exist anymore. 2715 */ 2716 device_printf(sc->sc_dev, 2717 "%s: can't map mbuf (error %d)\n", __func__, 2718 error); 2719 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2720 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2721 IFCOUNTER_OERRORS, 1); 2722 if (!frag) 2723 ieee80211_free_node(buf->ni); 2724 } 2725 m_freem(buf->m); 2726 error = 0; 2727 goto end; 2728 } 2729 } 2730 2731 KASSERT(nsegs < WPI_MAX_SCATTER, 2732 ("too many DMA segments, nsegs (%d) should be less than %d", 2733 nsegs, WPI_MAX_SCATTER)); 2734 2735 data->m = buf->m; 2736 data->ni = buf->ni; 2737 2738 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2739 __func__, ring->qid, cur, totlen, nsegs); 2740 2741 /* Fill TX descriptor. */ 2742 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2743 /* First DMA segment is used by the TX command. */ 2744 desc->segs[0].addr = htole32(data->cmd_paddr); 2745 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2746 /* Other DMA segments are for data payload. */ 2747 seg = &segs[0]; 2748 for (i = 1; i <= nsegs; i++) { 2749 desc->segs[i].addr = htole32(seg->ds_addr); 2750 desc->segs[i].len = htole32(seg->ds_len); 2751 seg++; 2752 } 2753 2754 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2755 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2756 BUS_DMASYNC_PREWRITE); 2757 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2758 BUS_DMASYNC_PREWRITE); 2759 2760 ring->pending += 1; 2761 2762 if (!frag) { 2763 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2764 WPI_TXQ_STATE_LOCK(sc); 2765 ring->queued += ring->pending; 2766 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2767 sc); 2768 WPI_TXQ_STATE_UNLOCK(sc); 2769 } 2770 2771 /* Kick TX ring. */ 2772 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2773 ring->pending = 0; 2774 sc->sc_update_tx_ring(sc, ring); 2775 } else 2776 ieee80211_node_incref(data->ni); 2777 2778 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2779 __func__); 2780 2781 WPI_TXQ_UNLOCK(sc); 2782 2783 return (error); 2784 } 2785 2786 /* 2787 * Construct the data packet for a transmit buffer. 2788 */ 2789 static int 2790 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2791 { 2792 const struct ieee80211_txparam *tp; 2793 struct ieee80211vap *vap = ni->ni_vap; 2794 struct ieee80211com *ic = ni->ni_ic; 2795 struct wpi_node *wn = WPI_NODE(ni); 2796 struct ieee80211_channel *chan; 2797 struct ieee80211_frame *wh; 2798 struct ieee80211_key *k = NULL; 2799 struct wpi_buf tx_data; 2800 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2801 uint32_t flags; 2802 uint16_t ac, qos; 2803 uint8_t tid, type, rate; 2804 int swcrypt, ismcast, totlen; 2805 2806 wh = mtod(m, struct ieee80211_frame *); 2807 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2808 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2809 swcrypt = 1; 2810 2811 /* Select EDCA Access Category and TX ring for this frame. */ 2812 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2813 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2814 tid = qos & IEEE80211_QOS_TID; 2815 } else { 2816 qos = 0; 2817 tid = 0; 2818 } 2819 ac = M_WME_GETAC(m); 2820 2821 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2822 ni->ni_chan : ic->ic_curchan; 2823 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2824 2825 /* Choose a TX rate index. */ 2826 if (type == IEEE80211_FC0_TYPE_MGT) 2827 rate = tp->mgmtrate; 2828 else if (ismcast) 2829 rate = tp->mcastrate; 2830 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2831 rate = tp->ucastrate; 2832 else if (m->m_flags & M_EAPOL) 2833 rate = tp->mgmtrate; 2834 else { 2835 /* XXX pass pktlen */ 2836 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2837 rate = ni->ni_txrate; 2838 } 2839 2840 /* Encrypt the frame if need be. */ 2841 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2842 /* Retrieve key for TX. */ 2843 k = ieee80211_crypto_encap(ni, m); 2844 if (k == NULL) 2845 return (ENOBUFS); 2846 2847 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2848 2849 /* 802.11 header may have moved. */ 2850 wh = mtod(m, struct ieee80211_frame *); 2851 } 2852 totlen = m->m_pkthdr.len; 2853 2854 if (ieee80211_radiotap_active_vap(vap)) { 2855 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2856 2857 tap->wt_flags = 0; 2858 tap->wt_rate = rate; 2859 if (k != NULL) 2860 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2861 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2862 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2863 2864 ieee80211_radiotap_tx(vap, m); 2865 } 2866 2867 flags = 0; 2868 if (!ismcast) { 2869 /* Unicast frame, check if an ACK is expected. */ 2870 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2871 IEEE80211_QOS_ACKPOLICY_NOACK) 2872 flags |= WPI_TX_NEED_ACK; 2873 } 2874 2875 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2876 flags |= WPI_TX_AUTO_SEQ; 2877 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2878 flags |= WPI_TX_MORE_FRAG; 2879 2880 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2881 if (!ismcast) { 2882 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2883 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2884 flags |= WPI_TX_NEED_RTS; 2885 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2886 WPI_RATE_IS_OFDM(rate)) { 2887 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2888 flags |= WPI_TX_NEED_CTS; 2889 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2890 flags |= WPI_TX_NEED_RTS; 2891 } 2892 2893 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2894 flags |= WPI_TX_FULL_TXOP; 2895 } 2896 2897 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2898 if (type == IEEE80211_FC0_TYPE_MGT) { 2899 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2900 2901 /* Tell HW to set timestamp in probe responses. */ 2902 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2903 flags |= WPI_TX_INSERT_TSTAMP; 2904 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2905 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2906 tx->timeout = htole16(3); 2907 else 2908 tx->timeout = htole16(2); 2909 } 2910 2911 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2912 tx->id = WPI_ID_BROADCAST; 2913 else { 2914 if (wn->id == WPI_ID_UNDEFINED) { 2915 device_printf(sc->sc_dev, 2916 "%s: undefined node id\n", __func__); 2917 return (EINVAL); 2918 } 2919 2920 tx->id = wn->id; 2921 } 2922 2923 if (!swcrypt) { 2924 switch (k->wk_cipher->ic_cipher) { 2925 case IEEE80211_CIPHER_AES_CCM: 2926 tx->security = WPI_CIPHER_CCMP; 2927 break; 2928 2929 default: 2930 break; 2931 } 2932 2933 memcpy(tx->key, k->wk_key, k->wk_keylen); 2934 } 2935 2936 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2937 struct mbuf *next = m->m_nextpkt; 2938 2939 tx->lnext = htole16(next->m_pkthdr.len); 2940 tx->fnext = htole32(tx->security | 2941 (flags & WPI_TX_NEED_ACK) | 2942 WPI_NEXT_STA_ID(tx->id)); 2943 } 2944 2945 tx->len = htole16(totlen); 2946 tx->flags = htole32(flags); 2947 tx->plcp = rate2plcp(rate); 2948 tx->tid = tid; 2949 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2950 tx->ofdm_mask = 0xff; 2951 tx->cck_mask = 0x0f; 2952 tx->rts_ntries = 7; 2953 tx->data_ntries = tp->maxretry; 2954 2955 tx_data.ni = ni; 2956 tx_data.m = m; 2957 tx_data.size = sizeof(struct wpi_cmd_data); 2958 tx_data.code = WPI_CMD_TX_DATA; 2959 tx_data.ac = ac; 2960 2961 return wpi_cmd2(sc, &tx_data); 2962 } 2963 2964 static int 2965 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2966 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2967 { 2968 struct ieee80211vap *vap = ni->ni_vap; 2969 struct ieee80211_key *k = NULL; 2970 struct ieee80211_frame *wh; 2971 struct wpi_buf tx_data; 2972 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2973 uint32_t flags; 2974 uint8_t ac, type, rate; 2975 int swcrypt, totlen; 2976 2977 wh = mtod(m, struct ieee80211_frame *); 2978 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2979 swcrypt = 1; 2980 2981 ac = params->ibp_pri & 3; 2982 2983 /* Choose a TX rate index. */ 2984 rate = params->ibp_rate0; 2985 2986 flags = 0; 2987 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2988 flags |= WPI_TX_AUTO_SEQ; 2989 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2990 flags |= WPI_TX_NEED_ACK; 2991 if (params->ibp_flags & IEEE80211_BPF_RTS) 2992 flags |= WPI_TX_NEED_RTS; 2993 if (params->ibp_flags & IEEE80211_BPF_CTS) 2994 flags |= WPI_TX_NEED_CTS; 2995 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2996 flags |= WPI_TX_FULL_TXOP; 2997 2998 /* Encrypt the frame if need be. */ 2999 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 3000 /* Retrieve key for TX. */ 3001 k = ieee80211_crypto_encap(ni, m); 3002 if (k == NULL) 3003 return (ENOBUFS); 3004 3005 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 3006 3007 /* 802.11 header may have moved. */ 3008 wh = mtod(m, struct ieee80211_frame *); 3009 } 3010 totlen = m->m_pkthdr.len; 3011 3012 if (ieee80211_radiotap_active_vap(vap)) { 3013 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 3014 3015 tap->wt_flags = 0; 3016 tap->wt_rate = rate; 3017 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 3018 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3019 3020 ieee80211_radiotap_tx(vap, m); 3021 } 3022 3023 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3024 if (type == IEEE80211_FC0_TYPE_MGT) { 3025 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3026 3027 /* Tell HW to set timestamp in probe responses. */ 3028 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3029 flags |= WPI_TX_INSERT_TSTAMP; 3030 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3031 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3032 tx->timeout = htole16(3); 3033 else 3034 tx->timeout = htole16(2); 3035 } 3036 3037 if (!swcrypt) { 3038 switch (k->wk_cipher->ic_cipher) { 3039 case IEEE80211_CIPHER_AES_CCM: 3040 tx->security = WPI_CIPHER_CCMP; 3041 break; 3042 3043 default: 3044 break; 3045 } 3046 3047 memcpy(tx->key, k->wk_key, k->wk_keylen); 3048 } 3049 3050 tx->len = htole16(totlen); 3051 tx->flags = htole32(flags); 3052 tx->plcp = rate2plcp(rate); 3053 tx->id = WPI_ID_BROADCAST; 3054 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3055 tx->rts_ntries = params->ibp_try1; 3056 tx->data_ntries = params->ibp_try0; 3057 3058 tx_data.ni = ni; 3059 tx_data.m = m; 3060 tx_data.size = sizeof(struct wpi_cmd_data); 3061 tx_data.code = WPI_CMD_TX_DATA; 3062 tx_data.ac = ac; 3063 3064 return wpi_cmd2(sc, &tx_data); 3065 } 3066 3067 static __inline int 3068 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3069 { 3070 struct wpi_tx_ring *ring = &sc->txq[ac]; 3071 int retval; 3072 3073 WPI_TXQ_STATE_LOCK(sc); 3074 retval = WPI_TX_RING_HIMARK - ring->queued; 3075 WPI_TXQ_STATE_UNLOCK(sc); 3076 3077 return retval; 3078 } 3079 3080 static int 3081 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3082 const struct ieee80211_bpf_params *params) 3083 { 3084 struct ieee80211com *ic = ni->ni_ic; 3085 struct wpi_softc *sc = ic->ic_softc; 3086 uint16_t ac; 3087 int error = 0; 3088 3089 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3090 3091 ac = M_WME_GETAC(m); 3092 3093 WPI_TX_LOCK(sc); 3094 3095 /* NB: no fragments here */ 3096 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3097 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3098 goto unlock; 3099 } 3100 3101 if (params == NULL) { 3102 /* 3103 * Legacy path; interpret frame contents to decide 3104 * precisely how to send the frame. 3105 */ 3106 error = wpi_tx_data(sc, m, ni); 3107 } else { 3108 /* 3109 * Caller supplied explicit parameters to use in 3110 * sending the frame. 3111 */ 3112 error = wpi_tx_data_raw(sc, m, ni, params); 3113 } 3114 3115 unlock: WPI_TX_UNLOCK(sc); 3116 3117 if (error != 0) { 3118 m_freem(m); 3119 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3120 3121 return error; 3122 } 3123 3124 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3125 3126 return 0; 3127 } 3128 3129 static int 3130 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3131 { 3132 struct wpi_softc *sc = ic->ic_softc; 3133 struct ieee80211_node *ni; 3134 struct mbuf *mnext; 3135 uint16_t ac; 3136 int error, nmbufs; 3137 3138 WPI_TX_LOCK(sc); 3139 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3140 3141 /* Check if interface is up & running. */ 3142 if (__predict_false(sc->sc_running == 0)) { 3143 error = ENXIO; 3144 goto unlock; 3145 } 3146 3147 nmbufs = 1; 3148 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3149 nmbufs++; 3150 3151 /* Check for available space. */ 3152 ac = M_WME_GETAC(m); 3153 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3154 error = ENOBUFS; 3155 goto unlock; 3156 } 3157 3158 error = 0; 3159 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3160 do { 3161 mnext = m->m_nextpkt; 3162 if (wpi_tx_data(sc, m, ni) != 0) { 3163 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3164 nmbufs); 3165 wpi_free_txfrags(sc, ac); 3166 ieee80211_free_mbuf(m); 3167 ieee80211_free_node(ni); 3168 break; 3169 } 3170 } while((m = mnext) != NULL); 3171 3172 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3173 3174 unlock: WPI_TX_UNLOCK(sc); 3175 3176 return (error); 3177 } 3178 3179 static void 3180 wpi_watchdog_rfkill(void *arg) 3181 { 3182 struct wpi_softc *sc = arg; 3183 struct ieee80211com *ic = &sc->sc_ic; 3184 3185 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3186 3187 /* No need to lock firmware memory. */ 3188 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3189 /* Radio kill switch is still off. */ 3190 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3191 sc); 3192 } else 3193 ieee80211_runtask(ic, &sc->sc_radioon_task); 3194 } 3195 3196 static void 3197 wpi_scan_timeout(void *arg) 3198 { 3199 struct wpi_softc *sc = arg; 3200 struct ieee80211com *ic = &sc->sc_ic; 3201 3202 ic_printf(ic, "scan timeout\n"); 3203 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3204 } 3205 3206 static void 3207 wpi_tx_timeout(void *arg) 3208 { 3209 struct wpi_softc *sc = arg; 3210 struct ieee80211com *ic = &sc->sc_ic; 3211 3212 ic_printf(ic, "device timeout\n"); 3213 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3214 } 3215 3216 static void 3217 wpi_parent(struct ieee80211com *ic) 3218 { 3219 struct wpi_softc *sc = ic->ic_softc; 3220 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3221 3222 if (ic->ic_nrunning > 0) { 3223 if (wpi_init(sc) == 0) { 3224 ieee80211_notify_radio(ic, 1); 3225 ieee80211_start_all(ic); 3226 } else { 3227 ieee80211_notify_radio(ic, 0); 3228 ieee80211_stop(vap); 3229 } 3230 } else 3231 wpi_stop(sc); 3232 } 3233 3234 /* 3235 * Send a command to the firmware. 3236 */ 3237 static int 3238 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3239 int async) 3240 { 3241 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3242 struct wpi_tx_desc *desc; 3243 struct wpi_tx_data *data; 3244 struct wpi_tx_cmd *cmd; 3245 struct mbuf *m; 3246 bus_addr_t paddr; 3247 uint16_t totlen; 3248 int error; 3249 3250 WPI_TXQ_LOCK(sc); 3251 3252 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3253 3254 if (__predict_false(sc->sc_running == 0)) { 3255 /* wpi_stop() was called */ 3256 if (code == WPI_CMD_SCAN) 3257 error = ENETDOWN; 3258 else 3259 error = 0; 3260 3261 goto fail; 3262 } 3263 3264 if (async == 0) 3265 WPI_LOCK_ASSERT(sc); 3266 3267 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3268 __func__, wpi_cmd_str(code), size, async); 3269 3270 desc = &ring->desc[ring->cur]; 3271 data = &ring->data[ring->cur]; 3272 totlen = 4 + size; 3273 3274 if (size > sizeof cmd->data) { 3275 /* Command is too large to fit in a descriptor. */ 3276 if (totlen > MCLBYTES) { 3277 error = EINVAL; 3278 goto fail; 3279 } 3280 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3281 if (m == NULL) { 3282 error = ENOMEM; 3283 goto fail; 3284 } 3285 cmd = mtod(m, struct wpi_tx_cmd *); 3286 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3287 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3288 if (error != 0) { 3289 m_freem(m); 3290 goto fail; 3291 } 3292 data->m = m; 3293 } else { 3294 cmd = &ring->cmd[ring->cur]; 3295 paddr = data->cmd_paddr; 3296 } 3297 3298 cmd->code = code; 3299 cmd->flags = 0; 3300 cmd->qid = ring->qid; 3301 cmd->idx = ring->cur; 3302 memcpy(cmd->data, buf, size); 3303 3304 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3305 desc->segs[0].addr = htole32(paddr); 3306 desc->segs[0].len = htole32(totlen); 3307 3308 if (size > sizeof cmd->data) { 3309 bus_dmamap_sync(ring->data_dmat, data->map, 3310 BUS_DMASYNC_PREWRITE); 3311 } else { 3312 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3313 BUS_DMASYNC_PREWRITE); 3314 } 3315 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3316 BUS_DMASYNC_PREWRITE); 3317 3318 /* Kick command ring. */ 3319 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3320 sc->sc_update_tx_ring(sc, ring); 3321 3322 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3323 3324 WPI_TXQ_UNLOCK(sc); 3325 3326 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3327 3328 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3329 3330 WPI_TXQ_UNLOCK(sc); 3331 3332 return error; 3333 } 3334 3335 /* 3336 * Configure HW multi-rate retries. 3337 */ 3338 static int 3339 wpi_mrr_setup(struct wpi_softc *sc) 3340 { 3341 struct ieee80211com *ic = &sc->sc_ic; 3342 struct wpi_mrr_setup mrr; 3343 uint8_t i; 3344 int error; 3345 3346 /* CCK rates (not used with 802.11a). */ 3347 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3348 mrr.rates[i].flags = 0; 3349 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3350 /* Fallback to the immediate lower CCK rate (if any.) */ 3351 mrr.rates[i].next = 3352 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3353 /* Try twice at this rate before falling back to "next". */ 3354 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3355 } 3356 /* OFDM rates (not used with 802.11b). */ 3357 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3358 mrr.rates[i].flags = 0; 3359 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3360 /* Fallback to the immediate lower rate (if any.) */ 3361 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3362 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3363 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3364 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3365 i - 1; 3366 /* Try twice at this rate before falling back to "next". */ 3367 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3368 } 3369 /* Setup MRR for control frames. */ 3370 mrr.which = htole32(WPI_MRR_CTL); 3371 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3372 if (error != 0) { 3373 device_printf(sc->sc_dev, 3374 "could not setup MRR for control frames\n"); 3375 return error; 3376 } 3377 /* Setup MRR for data frames. */ 3378 mrr.which = htole32(WPI_MRR_DATA); 3379 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3380 if (error != 0) { 3381 device_printf(sc->sc_dev, 3382 "could not setup MRR for data frames\n"); 3383 return error; 3384 } 3385 return 0; 3386 } 3387 3388 static int 3389 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3390 { 3391 struct ieee80211com *ic = ni->ni_ic; 3392 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3393 struct wpi_node *wn = WPI_NODE(ni); 3394 struct wpi_node_info node; 3395 int error; 3396 3397 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3398 3399 if (wn->id == WPI_ID_UNDEFINED) 3400 return EINVAL; 3401 3402 memset(&node, 0, sizeof node); 3403 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3404 node.id = wn->id; 3405 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3406 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3407 node.action = htole32(WPI_ACTION_SET_RATE); 3408 node.antenna = WPI_ANTENNA_BOTH; 3409 3410 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3411 wn->id, ether_sprintf(ni->ni_macaddr)); 3412 3413 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3414 if (error != 0) { 3415 device_printf(sc->sc_dev, 3416 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3417 error); 3418 return error; 3419 } 3420 3421 if (wvp->wv_gtk != 0) { 3422 error = wpi_set_global_keys(ni); 3423 if (error != 0) { 3424 device_printf(sc->sc_dev, 3425 "%s: error while setting global keys\n", __func__); 3426 return ENXIO; 3427 } 3428 } 3429 3430 return 0; 3431 } 3432 3433 /* 3434 * Broadcast node is used to send group-addressed and management frames. 3435 */ 3436 static int 3437 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3438 { 3439 struct ieee80211com *ic = &sc->sc_ic; 3440 struct wpi_node_info node; 3441 3442 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3443 3444 memset(&node, 0, sizeof node); 3445 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3446 node.id = WPI_ID_BROADCAST; 3447 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3448 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3449 node.action = htole32(WPI_ACTION_SET_RATE); 3450 node.antenna = WPI_ANTENNA_BOTH; 3451 3452 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3453 3454 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3455 } 3456 3457 static int 3458 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3459 { 3460 struct wpi_node *wn = WPI_NODE(ni); 3461 int error; 3462 3463 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3464 3465 wn->id = wpi_add_node_entry_sta(sc); 3466 3467 if ((error = wpi_add_node(sc, ni)) != 0) { 3468 wpi_del_node_entry(sc, wn->id); 3469 wn->id = WPI_ID_UNDEFINED; 3470 return error; 3471 } 3472 3473 return 0; 3474 } 3475 3476 static int 3477 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3478 { 3479 struct wpi_node *wn = WPI_NODE(ni); 3480 int error; 3481 3482 KASSERT(wn->id == WPI_ID_UNDEFINED, 3483 ("the node %d was added before", wn->id)); 3484 3485 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3486 3487 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3488 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3489 return ENOMEM; 3490 } 3491 3492 if ((error = wpi_add_node(sc, ni)) != 0) { 3493 wpi_del_node_entry(sc, wn->id); 3494 wn->id = WPI_ID_UNDEFINED; 3495 return error; 3496 } 3497 3498 return 0; 3499 } 3500 3501 static void 3502 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3503 { 3504 struct wpi_node *wn = WPI_NODE(ni); 3505 struct wpi_cmd_del_node node; 3506 int error; 3507 3508 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3509 3510 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3511 3512 memset(&node, 0, sizeof node); 3513 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3514 node.count = 1; 3515 3516 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3517 wn->id, ether_sprintf(ni->ni_macaddr)); 3518 3519 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3520 if (error != 0) { 3521 device_printf(sc->sc_dev, 3522 "%s: could not delete node %u, error %d\n", __func__, 3523 wn->id, error); 3524 } 3525 } 3526 3527 static int 3528 wpi_updateedca(struct ieee80211com *ic) 3529 { 3530 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3531 struct wpi_softc *sc = ic->ic_softc; 3532 struct wpi_edca_params cmd; 3533 int aci, error; 3534 3535 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3536 3537 memset(&cmd, 0, sizeof cmd); 3538 cmd.flags = htole32(WPI_EDCA_UPDATE); 3539 for (aci = 0; aci < WME_NUM_AC; aci++) { 3540 const struct wmeParams *ac = 3541 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3542 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3543 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3544 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3545 cmd.ac[aci].txoplimit = 3546 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3547 3548 DPRINTF(sc, WPI_DEBUG_EDCA, 3549 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3550 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3551 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3552 cmd.ac[aci].txoplimit); 3553 } 3554 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3555 3556 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3557 3558 return error; 3559 #undef WPI_EXP2 3560 } 3561 3562 static void 3563 wpi_set_promisc(struct wpi_softc *sc) 3564 { 3565 struct ieee80211com *ic = &sc->sc_ic; 3566 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3567 uint32_t promisc_filter; 3568 3569 promisc_filter = WPI_FILTER_CTL; 3570 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3571 promisc_filter |= WPI_FILTER_PROMISC; 3572 3573 if (ic->ic_promisc > 0) 3574 sc->rxon.filter |= htole32(promisc_filter); 3575 else 3576 sc->rxon.filter &= ~htole32(promisc_filter); 3577 } 3578 3579 static void 3580 wpi_update_promisc(struct ieee80211com *ic) 3581 { 3582 struct wpi_softc *sc = ic->ic_softc; 3583 3584 WPI_LOCK(sc); 3585 if (sc->sc_running == 0) { 3586 WPI_UNLOCK(sc); 3587 return; 3588 } 3589 WPI_UNLOCK(sc); 3590 3591 WPI_RXON_LOCK(sc); 3592 wpi_set_promisc(sc); 3593 3594 if (wpi_send_rxon(sc, 1, 1) != 0) { 3595 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3596 __func__); 3597 } 3598 WPI_RXON_UNLOCK(sc); 3599 } 3600 3601 static void 3602 wpi_update_mcast(struct ieee80211com *ic) 3603 { 3604 /* Ignore */ 3605 } 3606 3607 static void 3608 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3609 { 3610 struct wpi_cmd_led led; 3611 3612 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3613 3614 led.which = which; 3615 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3616 led.off = off; 3617 led.on = on; 3618 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3619 } 3620 3621 static int 3622 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3623 { 3624 struct wpi_cmd_timing cmd; 3625 uint64_t val, mod; 3626 3627 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3628 3629 memset(&cmd, 0, sizeof cmd); 3630 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3631 cmd.bintval = htole16(ni->ni_intval); 3632 cmd.lintval = htole16(10); 3633 3634 /* Compute remaining time until next beacon. */ 3635 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3636 mod = le64toh(cmd.tstamp) % val; 3637 cmd.binitval = htole32((uint32_t)(val - mod)); 3638 3639 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3640 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3641 3642 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3643 } 3644 3645 /* 3646 * This function is called periodically (every 60 seconds) to adjust output 3647 * power to temperature changes. 3648 */ 3649 static void 3650 wpi_power_calibration(struct wpi_softc *sc) 3651 { 3652 int temp; 3653 3654 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3655 3656 /* Update sensor data. */ 3657 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3658 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3659 3660 /* Sanity-check read value. */ 3661 if (temp < -260 || temp > 25) { 3662 /* This can't be correct, ignore. */ 3663 DPRINTF(sc, WPI_DEBUG_TEMP, 3664 "out-of-range temperature reported: %d\n", temp); 3665 return; 3666 } 3667 3668 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3669 3670 /* Adjust Tx power if need be. */ 3671 if (abs(temp - sc->temp) <= 6) 3672 return; 3673 3674 sc->temp = temp; 3675 3676 if (wpi_set_txpower(sc, 1) != 0) { 3677 /* just warn, too bad for the automatic calibration... */ 3678 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3679 } 3680 } 3681 3682 /* 3683 * Set TX power for current channel. 3684 */ 3685 static int 3686 wpi_set_txpower(struct wpi_softc *sc, int async) 3687 { 3688 struct wpi_power_group *group; 3689 struct wpi_cmd_txpower cmd; 3690 uint8_t chan; 3691 int idx, is_chan_5ghz, i; 3692 3693 /* Retrieve current channel from last RXON. */ 3694 chan = sc->rxon.chan; 3695 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3696 3697 /* Find the TX power group to which this channel belongs. */ 3698 if (is_chan_5ghz) { 3699 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3700 if (chan <= group->chan) 3701 break; 3702 } else 3703 group = &sc->groups[0]; 3704 3705 memset(&cmd, 0, sizeof cmd); 3706 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3707 cmd.chan = htole16(chan); 3708 3709 /* Set TX power for all OFDM and CCK rates. */ 3710 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3711 /* Retrieve TX power for this channel/rate. */ 3712 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3713 3714 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3715 3716 if (is_chan_5ghz) { 3717 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3718 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3719 } else { 3720 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3721 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3722 } 3723 DPRINTF(sc, WPI_DEBUG_TEMP, 3724 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3725 } 3726 3727 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3728 } 3729 3730 /* 3731 * Determine Tx power index for a given channel/rate combination. 3732 * This takes into account the regulatory information from EEPROM and the 3733 * current temperature. 3734 */ 3735 static int 3736 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3737 uint8_t chan, int is_chan_5ghz, int ridx) 3738 { 3739 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3740 #define fdivround(a, b, n) \ 3741 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3742 3743 /* Linear interpolation. */ 3744 #define interpolate(x, x1, y1, x2, y2, n) \ 3745 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3746 3747 struct wpi_power_sample *sample; 3748 int pwr, idx; 3749 3750 /* Default TX power is group maximum TX power minus 3dB. */ 3751 pwr = group->maxpwr / 2; 3752 3753 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3754 switch (ridx) { 3755 case WPI_RIDX_OFDM36: 3756 pwr -= is_chan_5ghz ? 5 : 0; 3757 break; 3758 case WPI_RIDX_OFDM48: 3759 pwr -= is_chan_5ghz ? 10 : 7; 3760 break; 3761 case WPI_RIDX_OFDM54: 3762 pwr -= is_chan_5ghz ? 12 : 9; 3763 break; 3764 } 3765 3766 /* Never exceed the channel maximum allowed TX power. */ 3767 pwr = min(pwr, sc->maxpwr[chan]); 3768 3769 /* Retrieve TX power index into gain tables from samples. */ 3770 for (sample = group->samples; sample < &group->samples[3]; sample++) 3771 if (pwr > sample[1].power) 3772 break; 3773 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3774 idx = interpolate(pwr, sample[0].power, sample[0].index, 3775 sample[1].power, sample[1].index, 19); 3776 3777 /*- 3778 * Adjust power index based on current temperature: 3779 * - if cooler than factory-calibrated: decrease output power 3780 * - if warmer than factory-calibrated: increase output power 3781 */ 3782 idx -= (sc->temp - group->temp) * 11 / 100; 3783 3784 /* Decrease TX power for CCK rates (-5dB). */ 3785 if (ridx >= WPI_RIDX_CCK1) 3786 idx += 10; 3787 3788 /* Make sure idx stays in a valid range. */ 3789 if (idx < 0) 3790 return 0; 3791 if (idx > WPI_MAX_PWR_INDEX) 3792 return WPI_MAX_PWR_INDEX; 3793 return idx; 3794 3795 #undef interpolate 3796 #undef fdivround 3797 } 3798 3799 /* 3800 * Set STA mode power saving level (between 0 and 5). 3801 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3802 */ 3803 static int 3804 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3805 { 3806 struct wpi_pmgt_cmd cmd; 3807 const struct wpi_pmgt *pmgt; 3808 uint32_t max, reg; 3809 uint8_t skip_dtim; 3810 int i; 3811 3812 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3813 "%s: dtim=%d, level=%d, async=%d\n", 3814 __func__, dtim, level, async); 3815 3816 /* Select which PS parameters to use. */ 3817 if (dtim <= 10) 3818 pmgt = &wpi_pmgt[0][level]; 3819 else 3820 pmgt = &wpi_pmgt[1][level]; 3821 3822 memset(&cmd, 0, sizeof cmd); 3823 if (level != 0) /* not CAM */ 3824 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3825 /* Retrieve PCIe Active State Power Management (ASPM). */ 3826 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3827 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3828 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3829 3830 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3831 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3832 3833 if (dtim == 0) { 3834 dtim = 1; 3835 skip_dtim = 0; 3836 } else 3837 skip_dtim = pmgt->skip_dtim; 3838 3839 if (skip_dtim != 0) { 3840 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3841 max = pmgt->intval[4]; 3842 if (max == (uint32_t)-1) 3843 max = dtim * (skip_dtim + 1); 3844 else if (max > dtim) 3845 max = (max / dtim) * dtim; 3846 } else 3847 max = dtim; 3848 3849 for (i = 0; i < 5; i++) 3850 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3851 3852 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3853 } 3854 3855 static int 3856 wpi_send_btcoex(struct wpi_softc *sc) 3857 { 3858 struct wpi_bluetooth cmd; 3859 3860 memset(&cmd, 0, sizeof cmd); 3861 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3862 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3863 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3864 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3865 __func__); 3866 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3867 } 3868 3869 static int 3870 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3871 { 3872 int error; 3873 3874 if (async) 3875 WPI_RXON_LOCK_ASSERT(sc); 3876 3877 if (assoc && wpi_check_bss_filter(sc) != 0) { 3878 struct wpi_assoc rxon_assoc; 3879 3880 rxon_assoc.flags = sc->rxon.flags; 3881 rxon_assoc.filter = sc->rxon.filter; 3882 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3883 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3884 rxon_assoc.reserved = 0; 3885 3886 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3887 sizeof (struct wpi_assoc), async); 3888 if (error != 0) { 3889 device_printf(sc->sc_dev, 3890 "RXON_ASSOC command failed, error %d\n", error); 3891 return error; 3892 } 3893 } else { 3894 if (async) { 3895 WPI_NT_LOCK(sc); 3896 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3897 sizeof (struct wpi_rxon), async); 3898 if (error == 0) 3899 wpi_clear_node_table(sc); 3900 WPI_NT_UNLOCK(sc); 3901 } else { 3902 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3903 sizeof (struct wpi_rxon), async); 3904 if (error == 0) 3905 wpi_clear_node_table(sc); 3906 } 3907 3908 if (error != 0) { 3909 device_printf(sc->sc_dev, 3910 "RXON command failed, error %d\n", error); 3911 return error; 3912 } 3913 3914 /* Add broadcast node. */ 3915 error = wpi_add_broadcast_node(sc, async); 3916 if (error != 0) { 3917 device_printf(sc->sc_dev, 3918 "could not add broadcast node, error %d\n", error); 3919 return error; 3920 } 3921 } 3922 3923 /* Configuration has changed, set Tx power accordingly. */ 3924 if ((error = wpi_set_txpower(sc, async)) != 0) { 3925 device_printf(sc->sc_dev, 3926 "%s: could not set TX power, error %d\n", __func__, error); 3927 return error; 3928 } 3929 3930 return 0; 3931 } 3932 3933 /** 3934 * Configure the card to listen to a particular channel, this transisions the 3935 * card in to being able to receive frames from remote devices. 3936 */ 3937 static int 3938 wpi_config(struct wpi_softc *sc) 3939 { 3940 struct ieee80211com *ic = &sc->sc_ic; 3941 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3942 struct ieee80211_channel *c = ic->ic_curchan; 3943 int error; 3944 3945 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3946 3947 /* Set power saving level to CAM during initialization. */ 3948 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3949 device_printf(sc->sc_dev, 3950 "%s: could not set power saving level\n", __func__); 3951 return error; 3952 } 3953 3954 /* Configure bluetooth coexistence. */ 3955 if ((error = wpi_send_btcoex(sc)) != 0) { 3956 device_printf(sc->sc_dev, 3957 "could not configure bluetooth coexistence\n"); 3958 return error; 3959 } 3960 3961 /* Configure adapter. */ 3962 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3963 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3964 3965 /* Set default channel. */ 3966 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3967 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3968 if (IEEE80211_IS_CHAN_2GHZ(c)) 3969 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3970 3971 sc->rxon.filter = WPI_FILTER_MULTICAST; 3972 switch (ic->ic_opmode) { 3973 case IEEE80211_M_STA: 3974 sc->rxon.mode = WPI_MODE_STA; 3975 break; 3976 case IEEE80211_M_IBSS: 3977 sc->rxon.mode = WPI_MODE_IBSS; 3978 sc->rxon.filter |= WPI_FILTER_BEACON; 3979 break; 3980 case IEEE80211_M_HOSTAP: 3981 /* XXX workaround for beaconing */ 3982 sc->rxon.mode = WPI_MODE_IBSS; 3983 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3984 break; 3985 case IEEE80211_M_AHDEMO: 3986 sc->rxon.mode = WPI_MODE_HOSTAP; 3987 break; 3988 case IEEE80211_M_MONITOR: 3989 sc->rxon.mode = WPI_MODE_MONITOR; 3990 break; 3991 default: 3992 device_printf(sc->sc_dev, "unknown opmode %d\n", 3993 ic->ic_opmode); 3994 return EINVAL; 3995 } 3996 sc->rxon.filter = htole32(sc->rxon.filter); 3997 wpi_set_promisc(sc); 3998 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3999 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4000 4001 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 4002 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4003 __func__); 4004 return error; 4005 } 4006 4007 /* Setup rate scalling. */ 4008 if ((error = wpi_mrr_setup(sc)) != 0) { 4009 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 4010 error); 4011 return error; 4012 } 4013 4014 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4015 4016 return 0; 4017 } 4018 4019 static uint16_t 4020 wpi_get_active_dwell_time(struct wpi_softc *sc, 4021 struct ieee80211_channel *c, uint8_t n_probes) 4022 { 4023 /* No channel? Default to 2GHz settings. */ 4024 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4025 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4026 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4027 } 4028 4029 /* 5GHz dwell time. */ 4030 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4031 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4032 } 4033 4034 /* 4035 * Limit the total dwell time. 4036 * 4037 * Returns the dwell time in milliseconds. 4038 */ 4039 static uint16_t 4040 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4041 { 4042 struct ieee80211com *ic = &sc->sc_ic; 4043 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4044 uint16_t bintval = 0; 4045 4046 /* bintval is in TU (1.024mS) */ 4047 if (vap != NULL) 4048 bintval = vap->iv_bss->ni_intval; 4049 4050 /* 4051 * If it's non-zero, we should calculate the minimum of 4052 * it and the DWELL_BASE. 4053 * 4054 * XXX Yes, the math should take into account that bintval 4055 * is 1.024mS, not 1mS.. 4056 */ 4057 if (bintval > 0) { 4058 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4059 bintval); 4060 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4061 } 4062 4063 /* No association context? Default. */ 4064 return dwell_time; 4065 } 4066 4067 static uint16_t 4068 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4069 { 4070 uint16_t passive; 4071 4072 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4073 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4074 else 4075 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4076 4077 /* Clamp to the beacon interval if we're associated. */ 4078 return (wpi_limit_dwell(sc, passive)); 4079 } 4080 4081 static uint32_t 4082 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4083 { 4084 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4085 uint32_t nbeacons = time / bintval; 4086 4087 if (mod > WPI_PAUSE_MAX_TIME) 4088 mod = WPI_PAUSE_MAX_TIME; 4089 4090 return WPI_PAUSE_SCAN(nbeacons, mod); 4091 } 4092 4093 /* 4094 * Send a scan request to the firmware. 4095 */ 4096 static int 4097 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4098 { 4099 struct ieee80211com *ic = &sc->sc_ic; 4100 struct ieee80211_scan_state *ss = ic->ic_scan; 4101 struct ieee80211vap *vap = ss->ss_vap; 4102 struct wpi_scan_hdr *hdr; 4103 struct wpi_cmd_data *tx; 4104 struct wpi_scan_essid *essids; 4105 struct wpi_scan_chan *chan; 4106 struct ieee80211_frame *wh; 4107 struct ieee80211_rateset *rs; 4108 uint16_t bintval, buflen, dwell_active, dwell_passive; 4109 uint8_t *buf, *frm, i, nssid; 4110 int bgscan, error; 4111 4112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4113 4114 /* 4115 * We are absolutely not allowed to send a scan command when another 4116 * scan command is pending. 4117 */ 4118 if (callout_pending(&sc->scan_timeout)) { 4119 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4120 __func__); 4121 error = EAGAIN; 4122 goto fail; 4123 } 4124 4125 bgscan = wpi_check_bss_filter(sc); 4126 bintval = vap->iv_bss->ni_intval; 4127 if (bgscan != 0 && 4128 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4129 error = EOPNOTSUPP; 4130 goto fail; 4131 } 4132 4133 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4134 if (buf == NULL) { 4135 device_printf(sc->sc_dev, 4136 "%s: could not allocate buffer for scan command\n", 4137 __func__); 4138 error = ENOMEM; 4139 goto fail; 4140 } 4141 hdr = (struct wpi_scan_hdr *)buf; 4142 4143 /* 4144 * Move to the next channel if no packets are received within 10 msecs 4145 * after sending the probe request. 4146 */ 4147 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4148 hdr->quiet_threshold = htole16(1); 4149 4150 if (bgscan != 0) { 4151 /* 4152 * Max needs to be greater than active and passive and quiet! 4153 * It's also in microseconds! 4154 */ 4155 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4156 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4157 bintval)); 4158 } 4159 4160 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4161 4162 tx = (struct wpi_cmd_data *)(hdr + 1); 4163 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4164 tx->id = WPI_ID_BROADCAST; 4165 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4166 4167 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4168 /* Send probe requests at 6Mbps. */ 4169 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4170 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4171 } else { 4172 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4173 /* Send probe requests at 1Mbps. */ 4174 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4175 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4176 } 4177 4178 essids = (struct wpi_scan_essid *)(tx + 1); 4179 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4180 for (i = 0; i < nssid; i++) { 4181 essids[i].id = IEEE80211_ELEMID_SSID; 4182 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4183 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4184 #ifdef WPI_DEBUG 4185 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4186 printf("Scanning Essid: "); 4187 ieee80211_print_essid(essids[i].data, essids[i].len); 4188 printf("\n"); 4189 } 4190 #endif 4191 } 4192 4193 /* 4194 * Build a probe request frame. Most of the following code is a 4195 * copy & paste of what is done in net80211. 4196 */ 4197 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4198 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4199 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4200 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4201 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4202 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4203 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4204 4205 frm = (uint8_t *)(wh + 1); 4206 frm = ieee80211_add_ssid(frm, NULL, 0); 4207 frm = ieee80211_add_rates(frm, rs); 4208 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4209 frm = ieee80211_add_xrates(frm, rs); 4210 4211 /* Set length of probe request. */ 4212 tx->len = htole16(frm - (uint8_t *)wh); 4213 4214 /* 4215 * Construct information about the channel that we 4216 * want to scan. The firmware expects this to be directly 4217 * after the scan probe request 4218 */ 4219 chan = (struct wpi_scan_chan *)frm; 4220 chan->chan = ieee80211_chan2ieee(ic, c); 4221 chan->flags = 0; 4222 if (nssid) { 4223 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4224 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4225 } else 4226 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4227 4228 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4229 chan->flags |= WPI_CHAN_ACTIVE; 4230 4231 /* 4232 * Calculate the active/passive dwell times. 4233 */ 4234 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4235 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4236 4237 /* Make sure they're valid. */ 4238 if (dwell_active > dwell_passive) 4239 dwell_active = dwell_passive; 4240 4241 chan->active = htole16(dwell_active); 4242 chan->passive = htole16(dwell_passive); 4243 4244 chan->dsp_gain = 0x6e; /* Default level */ 4245 4246 if (IEEE80211_IS_CHAN_5GHZ(c)) 4247 chan->rf_gain = 0x3b; 4248 else 4249 chan->rf_gain = 0x28; 4250 4251 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4252 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4253 4254 hdr->nchan++; 4255 4256 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4257 /* XXX Force probe request transmission. */ 4258 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4259 4260 chan++; 4261 4262 /* Reduce unnecessary delay. */ 4263 chan->flags = 0; 4264 chan->passive = chan->active = hdr->quiet_time; 4265 4266 hdr->nchan++; 4267 } 4268 4269 chan++; 4270 4271 buflen = (uint8_t *)chan - buf; 4272 hdr->len = htole16(buflen); 4273 4274 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4275 hdr->nchan); 4276 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4277 free(buf, M_DEVBUF); 4278 4279 if (error != 0) 4280 goto fail; 4281 4282 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4283 4284 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4285 4286 return 0; 4287 4288 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4289 4290 return error; 4291 } 4292 4293 static int 4294 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4295 { 4296 struct ieee80211com *ic = vap->iv_ic; 4297 struct ieee80211_node *ni = vap->iv_bss; 4298 struct ieee80211_channel *c = ni->ni_chan; 4299 int error; 4300 4301 WPI_RXON_LOCK(sc); 4302 4303 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4304 4305 /* Update adapter configuration. */ 4306 sc->rxon.associd = 0; 4307 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4308 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4309 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4310 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4311 if (IEEE80211_IS_CHAN_2GHZ(c)) 4312 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4313 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4314 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4315 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4316 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4317 if (IEEE80211_IS_CHAN_A(c)) { 4318 sc->rxon.cck_mask = 0; 4319 sc->rxon.ofdm_mask = 0x15; 4320 } else if (IEEE80211_IS_CHAN_B(c)) { 4321 sc->rxon.cck_mask = 0x03; 4322 sc->rxon.ofdm_mask = 0; 4323 } else { 4324 /* Assume 802.11b/g. */ 4325 sc->rxon.cck_mask = 0x0f; 4326 sc->rxon.ofdm_mask = 0x15; 4327 } 4328 4329 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4330 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4331 sc->rxon.ofdm_mask); 4332 4333 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4334 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4335 __func__); 4336 } 4337 4338 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4339 4340 WPI_RXON_UNLOCK(sc); 4341 4342 return error; 4343 } 4344 4345 static int 4346 wpi_config_beacon(struct wpi_vap *wvp) 4347 { 4348 struct ieee80211vap *vap = &wvp->wv_vap; 4349 struct ieee80211com *ic = vap->iv_ic; 4350 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4351 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4352 struct wpi_softc *sc = ic->ic_softc; 4353 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4354 struct ieee80211_tim_ie *tie; 4355 struct mbuf *m; 4356 uint8_t *ptr; 4357 int error; 4358 4359 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4360 4361 WPI_VAP_LOCK_ASSERT(wvp); 4362 4363 cmd->len = htole16(bcn->m->m_pkthdr.len); 4364 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4365 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4366 4367 /* XXX seems to be unused */ 4368 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4369 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4370 ptr = mtod(bcn->m, uint8_t *); 4371 4372 cmd->tim = htole16(bo->bo_tim - ptr); 4373 cmd->timsz = tie->tim_len; 4374 } 4375 4376 /* Necessary for recursion in ieee80211_beacon_update(). */ 4377 m = bcn->m; 4378 bcn->m = m_dup(m, M_NOWAIT); 4379 if (bcn->m == NULL) { 4380 device_printf(sc->sc_dev, 4381 "%s: could not copy beacon frame\n", __func__); 4382 error = ENOMEM; 4383 goto end; 4384 } 4385 4386 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4387 device_printf(sc->sc_dev, 4388 "%s: could not update beacon frame, error %d", __func__, 4389 error); 4390 m_freem(bcn->m); 4391 } 4392 4393 /* Restore mbuf. */ 4394 end: bcn->m = m; 4395 4396 return error; 4397 } 4398 4399 static int 4400 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4401 { 4402 struct ieee80211vap *vap = ni->ni_vap; 4403 struct wpi_vap *wvp = WPI_VAP(vap); 4404 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4405 struct mbuf *m; 4406 int error; 4407 4408 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4409 4410 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4411 return EINVAL; 4412 4413 m = ieee80211_beacon_alloc(ni); 4414 if (m == NULL) { 4415 device_printf(sc->sc_dev, 4416 "%s: could not allocate beacon frame\n", __func__); 4417 return ENOMEM; 4418 } 4419 4420 WPI_VAP_LOCK(wvp); 4421 if (bcn->m != NULL) 4422 m_freem(bcn->m); 4423 4424 bcn->m = m; 4425 4426 error = wpi_config_beacon(wvp); 4427 WPI_VAP_UNLOCK(wvp); 4428 4429 return error; 4430 } 4431 4432 static void 4433 wpi_update_beacon(struct ieee80211vap *vap, int item) 4434 { 4435 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4436 struct wpi_vap *wvp = WPI_VAP(vap); 4437 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4438 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4439 struct ieee80211_node *ni = vap->iv_bss; 4440 int mcast = 0; 4441 4442 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4443 4444 WPI_VAP_LOCK(wvp); 4445 if (bcn->m == NULL) { 4446 bcn->m = ieee80211_beacon_alloc(ni); 4447 if (bcn->m == NULL) { 4448 device_printf(sc->sc_dev, 4449 "%s: could not allocate beacon frame\n", __func__); 4450 4451 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4452 __func__); 4453 4454 WPI_VAP_UNLOCK(wvp); 4455 return; 4456 } 4457 } 4458 WPI_VAP_UNLOCK(wvp); 4459 4460 if (item == IEEE80211_BEACON_TIM) 4461 mcast = 1; /* TODO */ 4462 4463 setbit(bo->bo_flags, item); 4464 ieee80211_beacon_update(ni, bcn->m, mcast); 4465 4466 WPI_VAP_LOCK(wvp); 4467 wpi_config_beacon(wvp); 4468 WPI_VAP_UNLOCK(wvp); 4469 4470 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4471 } 4472 4473 static void 4474 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4475 { 4476 struct ieee80211vap *vap = ni->ni_vap; 4477 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4478 struct wpi_node *wn = WPI_NODE(ni); 4479 int error; 4480 4481 WPI_NT_LOCK(sc); 4482 4483 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4484 4485 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4486 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4487 device_printf(sc->sc_dev, 4488 "%s: could not add IBSS node, error %d\n", 4489 __func__, error); 4490 } 4491 } 4492 WPI_NT_UNLOCK(sc); 4493 } 4494 4495 static int 4496 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4497 { 4498 struct ieee80211com *ic = vap->iv_ic; 4499 struct ieee80211_node *ni = vap->iv_bss; 4500 struct ieee80211_channel *c = ni->ni_chan; 4501 int error; 4502 4503 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4504 4505 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4506 /* Link LED blinks while monitoring. */ 4507 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4508 return 0; 4509 } 4510 4511 /* XXX kernel panic workaround */ 4512 if (c == IEEE80211_CHAN_ANYC) { 4513 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4514 __func__); 4515 return EINVAL; 4516 } 4517 4518 if ((error = wpi_set_timing(sc, ni)) != 0) { 4519 device_printf(sc->sc_dev, 4520 "%s: could not set timing, error %d\n", __func__, error); 4521 return error; 4522 } 4523 4524 /* Update adapter configuration. */ 4525 WPI_RXON_LOCK(sc); 4526 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4527 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4528 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4529 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4530 if (IEEE80211_IS_CHAN_2GHZ(c)) 4531 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4532 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4533 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4534 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4535 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4536 if (IEEE80211_IS_CHAN_A(c)) { 4537 sc->rxon.cck_mask = 0; 4538 sc->rxon.ofdm_mask = 0x15; 4539 } else if (IEEE80211_IS_CHAN_B(c)) { 4540 sc->rxon.cck_mask = 0x03; 4541 sc->rxon.ofdm_mask = 0; 4542 } else { 4543 /* Assume 802.11b/g. */ 4544 sc->rxon.cck_mask = 0x0f; 4545 sc->rxon.ofdm_mask = 0x15; 4546 } 4547 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4548 4549 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4550 sc->rxon.chan, sc->rxon.flags); 4551 4552 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4553 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4554 __func__); 4555 return error; 4556 } 4557 4558 /* Start periodic calibration timer. */ 4559 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4560 4561 WPI_RXON_UNLOCK(sc); 4562 4563 if (vap->iv_opmode == IEEE80211_M_IBSS || 4564 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4565 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4566 device_printf(sc->sc_dev, 4567 "%s: could not setup beacon, error %d\n", __func__, 4568 error); 4569 return error; 4570 } 4571 } 4572 4573 if (vap->iv_opmode == IEEE80211_M_STA) { 4574 /* Add BSS node. */ 4575 WPI_NT_LOCK(sc); 4576 error = wpi_add_sta_node(sc, ni); 4577 WPI_NT_UNLOCK(sc); 4578 if (error != 0) { 4579 device_printf(sc->sc_dev, 4580 "%s: could not add BSS node, error %d\n", __func__, 4581 error); 4582 return error; 4583 } 4584 } 4585 4586 /* Link LED always on while associated. */ 4587 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4588 4589 /* Enable power-saving mode if requested by user. */ 4590 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4591 vap->iv_opmode != IEEE80211_M_IBSS) 4592 (void)wpi_set_pslevel(sc, 0, 3, 1); 4593 4594 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4595 4596 return 0; 4597 } 4598 4599 static int 4600 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4601 { 4602 const struct ieee80211_cipher *cip = k->wk_cipher; 4603 struct ieee80211vap *vap = ni->ni_vap; 4604 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4605 struct wpi_node *wn = WPI_NODE(ni); 4606 struct wpi_node_info node; 4607 uint16_t kflags; 4608 int error; 4609 4610 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4611 4612 if (wpi_check_node_entry(sc, wn->id) == 0) { 4613 device_printf(sc->sc_dev, "%s: node does not exist\n", 4614 __func__); 4615 return 0; 4616 } 4617 4618 switch (cip->ic_cipher) { 4619 case IEEE80211_CIPHER_AES_CCM: 4620 kflags = WPI_KFLAG_CCMP; 4621 break; 4622 4623 default: 4624 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4625 cip->ic_cipher); 4626 return 0; 4627 } 4628 4629 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4630 if (k->wk_flags & IEEE80211_KEY_GROUP) 4631 kflags |= WPI_KFLAG_MULTICAST; 4632 4633 memset(&node, 0, sizeof node); 4634 node.id = wn->id; 4635 node.control = WPI_NODE_UPDATE; 4636 node.flags = WPI_FLAG_KEY_SET; 4637 node.kflags = htole16(kflags); 4638 memcpy(node.key, k->wk_key, k->wk_keylen); 4639 again: 4640 DPRINTF(sc, WPI_DEBUG_KEY, 4641 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4642 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4643 node.id, ether_sprintf(ni->ni_macaddr)); 4644 4645 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4646 if (error != 0) { 4647 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4648 error); 4649 return !error; 4650 } 4651 4652 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4653 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4654 kflags |= WPI_KFLAG_MULTICAST; 4655 node.kflags = htole16(kflags); 4656 4657 goto again; 4658 } 4659 4660 return 1; 4661 } 4662 4663 static void 4664 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4665 { 4666 const struct ieee80211_key *k = arg; 4667 struct ieee80211vap *vap = ni->ni_vap; 4668 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4669 struct wpi_node *wn = WPI_NODE(ni); 4670 int error; 4671 4672 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4673 return; 4674 4675 WPI_NT_LOCK(sc); 4676 error = wpi_load_key(ni, k); 4677 WPI_NT_UNLOCK(sc); 4678 4679 if (error == 0) { 4680 device_printf(sc->sc_dev, "%s: error while setting key\n", 4681 __func__); 4682 } 4683 } 4684 4685 static int 4686 wpi_set_global_keys(struct ieee80211_node *ni) 4687 { 4688 struct ieee80211vap *vap = ni->ni_vap; 4689 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4690 int error = 1; 4691 4692 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4693 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4694 error = wpi_load_key(ni, wk); 4695 4696 return !error; 4697 } 4698 4699 static int 4700 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4701 { 4702 struct ieee80211vap *vap = ni->ni_vap; 4703 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4704 struct wpi_node *wn = WPI_NODE(ni); 4705 struct wpi_node_info node; 4706 uint16_t kflags; 4707 int error; 4708 4709 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4710 4711 if (wpi_check_node_entry(sc, wn->id) == 0) { 4712 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4713 return 1; /* Nothing to do. */ 4714 } 4715 4716 kflags = WPI_KFLAG_KID(k->wk_keyix); 4717 if (k->wk_flags & IEEE80211_KEY_GROUP) 4718 kflags |= WPI_KFLAG_MULTICAST; 4719 4720 memset(&node, 0, sizeof node); 4721 node.id = wn->id; 4722 node.control = WPI_NODE_UPDATE; 4723 node.flags = WPI_FLAG_KEY_SET; 4724 node.kflags = htole16(kflags); 4725 again: 4726 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4727 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4728 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4729 4730 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4731 if (error != 0) { 4732 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4733 error); 4734 return !error; 4735 } 4736 4737 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4738 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4739 kflags |= WPI_KFLAG_MULTICAST; 4740 node.kflags = htole16(kflags); 4741 4742 goto again; 4743 } 4744 4745 return 1; 4746 } 4747 4748 static void 4749 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4750 { 4751 const struct ieee80211_key *k = arg; 4752 struct ieee80211vap *vap = ni->ni_vap; 4753 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4754 struct wpi_node *wn = WPI_NODE(ni); 4755 int error; 4756 4757 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4758 return; 4759 4760 WPI_NT_LOCK(sc); 4761 error = wpi_del_key(ni, k); 4762 WPI_NT_UNLOCK(sc); 4763 4764 if (error == 0) { 4765 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4766 __func__); 4767 } 4768 } 4769 4770 static int 4771 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4772 int set) 4773 { 4774 struct ieee80211com *ic = vap->iv_ic; 4775 struct wpi_softc *sc = ic->ic_softc; 4776 struct wpi_vap *wvp = WPI_VAP(vap); 4777 struct ieee80211_node *ni; 4778 int error, ni_ref = 0; 4779 4780 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4781 4782 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4783 /* Not for us. */ 4784 return 1; 4785 } 4786 4787 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4788 /* XMIT keys are handled in wpi_tx_data(). */ 4789 return 1; 4790 } 4791 4792 /* Handle group keys. */ 4793 if (&vap->iv_nw_keys[0] <= k && 4794 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4795 WPI_NT_LOCK(sc); 4796 if (set) 4797 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4798 else 4799 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4800 WPI_NT_UNLOCK(sc); 4801 4802 if (vap->iv_state == IEEE80211_S_RUN) { 4803 ieee80211_iterate_nodes(&ic->ic_sta, 4804 set ? wpi_load_key_cb : wpi_del_key_cb, 4805 __DECONST(void *, k)); 4806 } 4807 4808 return 1; 4809 } 4810 4811 switch (vap->iv_opmode) { 4812 case IEEE80211_M_STA: 4813 ni = vap->iv_bss; 4814 break; 4815 4816 case IEEE80211_M_IBSS: 4817 case IEEE80211_M_AHDEMO: 4818 case IEEE80211_M_HOSTAP: 4819 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4820 if (ni == NULL) 4821 return 0; /* should not happen */ 4822 4823 ni_ref = 1; 4824 break; 4825 4826 default: 4827 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4828 vap->iv_opmode); 4829 return 0; 4830 } 4831 4832 WPI_NT_LOCK(sc); 4833 if (set) 4834 error = wpi_load_key(ni, k); 4835 else 4836 error = wpi_del_key(ni, k); 4837 WPI_NT_UNLOCK(sc); 4838 4839 if (ni_ref) 4840 ieee80211_node_decref(ni); 4841 4842 return error; 4843 } 4844 4845 static int 4846 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4847 { 4848 return wpi_process_key(vap, k, 1); 4849 } 4850 4851 static int 4852 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4853 { 4854 return wpi_process_key(vap, k, 0); 4855 } 4856 4857 /* 4858 * This function is called after the runtime firmware notifies us of its 4859 * readiness (called in a process context). 4860 */ 4861 static int 4862 wpi_post_alive(struct wpi_softc *sc) 4863 { 4864 int ntries, error; 4865 4866 /* Check (again) that the radio is not disabled. */ 4867 if ((error = wpi_nic_lock(sc)) != 0) 4868 return error; 4869 4870 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4871 4872 /* NB: Runtime firmware must be up and running. */ 4873 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4874 device_printf(sc->sc_dev, 4875 "RF switch: radio disabled (%s)\n", __func__); 4876 wpi_nic_unlock(sc); 4877 return EPERM; /* :-) */ 4878 } 4879 wpi_nic_unlock(sc); 4880 4881 /* Wait for thermal sensor to calibrate. */ 4882 for (ntries = 0; ntries < 1000; ntries++) { 4883 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4884 break; 4885 DELAY(10); 4886 } 4887 4888 if (ntries == 1000) { 4889 device_printf(sc->sc_dev, 4890 "timeout waiting for thermal sensor calibration\n"); 4891 return ETIMEDOUT; 4892 } 4893 4894 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4895 return 0; 4896 } 4897 4898 /* 4899 * The firmware boot code is small and is intended to be copied directly into 4900 * the NIC internal memory (no DMA transfer). 4901 */ 4902 static int 4903 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4904 { 4905 int error, ntries; 4906 4907 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4908 4909 size /= sizeof (uint32_t); 4910 4911 if ((error = wpi_nic_lock(sc)) != 0) 4912 return error; 4913 4914 /* Copy microcode image into NIC memory. */ 4915 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4916 (const uint32_t *)ucode, size); 4917 4918 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4919 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4920 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4921 4922 /* Start boot load now. */ 4923 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4924 4925 /* Wait for transfer to complete. */ 4926 for (ntries = 0; ntries < 1000; ntries++) { 4927 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4928 DPRINTF(sc, WPI_DEBUG_HW, 4929 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4930 WPI_FH_TX_STATUS_IDLE(6), 4931 status & WPI_FH_TX_STATUS_IDLE(6)); 4932 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4933 DPRINTF(sc, WPI_DEBUG_HW, 4934 "Status Match! - ntries = %d\n", ntries); 4935 break; 4936 } 4937 DELAY(10); 4938 } 4939 if (ntries == 1000) { 4940 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4941 __func__); 4942 wpi_nic_unlock(sc); 4943 return ETIMEDOUT; 4944 } 4945 4946 /* Enable boot after power up. */ 4947 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4948 4949 wpi_nic_unlock(sc); 4950 return 0; 4951 } 4952 4953 static int 4954 wpi_load_firmware(struct wpi_softc *sc) 4955 { 4956 struct wpi_fw_info *fw = &sc->fw; 4957 struct wpi_dma_info *dma = &sc->fw_dma; 4958 int error; 4959 4960 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4961 4962 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4963 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4964 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4965 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4966 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4967 4968 /* Tell adapter where to find initialization sections. */ 4969 if ((error = wpi_nic_lock(sc)) != 0) 4970 return error; 4971 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4972 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4973 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4974 dma->paddr + WPI_FW_DATA_MAXSZ); 4975 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4976 wpi_nic_unlock(sc); 4977 4978 /* Load firmware boot code. */ 4979 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4980 if (error != 0) { 4981 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4982 __func__); 4983 return error; 4984 } 4985 4986 /* Now press "execute". */ 4987 WPI_WRITE(sc, WPI_RESET, 0); 4988 4989 /* Wait at most one second for first alive notification. */ 4990 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4991 device_printf(sc->sc_dev, 4992 "%s: timeout waiting for adapter to initialize, error %d\n", 4993 __func__, error); 4994 return error; 4995 } 4996 4997 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4998 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4999 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 5000 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 5001 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 5002 5003 /* Tell adapter where to find runtime sections. */ 5004 if ((error = wpi_nic_lock(sc)) != 0) 5005 return error; 5006 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 5007 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5008 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 5009 dma->paddr + WPI_FW_DATA_MAXSZ); 5010 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 5011 WPI_FW_UPDATED | fw->main.textsz); 5012 wpi_nic_unlock(sc); 5013 5014 return 0; 5015 } 5016 5017 static int 5018 wpi_read_firmware(struct wpi_softc *sc) 5019 { 5020 const struct firmware *fp; 5021 struct wpi_fw_info *fw = &sc->fw; 5022 const struct wpi_firmware_hdr *hdr; 5023 int error; 5024 5025 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5026 5027 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5028 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5029 5030 WPI_UNLOCK(sc); 5031 fp = firmware_get(WPI_FW_NAME); 5032 WPI_LOCK(sc); 5033 5034 if (fp == NULL) { 5035 device_printf(sc->sc_dev, 5036 "could not load firmware image '%s'\n", WPI_FW_NAME); 5037 return EINVAL; 5038 } 5039 5040 sc->fw_fp = fp; 5041 5042 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5043 device_printf(sc->sc_dev, 5044 "firmware file too short: %zu bytes\n", fp->datasize); 5045 error = EINVAL; 5046 goto fail; 5047 } 5048 5049 fw->size = fp->datasize; 5050 fw->data = (const uint8_t *)fp->data; 5051 5052 /* Extract firmware header information. */ 5053 hdr = (const struct wpi_firmware_hdr *)fw->data; 5054 5055 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5056 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5057 5058 fw->main.textsz = le32toh(hdr->rtextsz); 5059 fw->main.datasz = le32toh(hdr->rdatasz); 5060 fw->init.textsz = le32toh(hdr->itextsz); 5061 fw->init.datasz = le32toh(hdr->idatasz); 5062 fw->boot.textsz = le32toh(hdr->btextsz); 5063 fw->boot.datasz = 0; 5064 5065 /* Sanity-check firmware header. */ 5066 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5067 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5068 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5069 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5070 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5071 (fw->boot.textsz & 3) != 0) { 5072 device_printf(sc->sc_dev, "invalid firmware header\n"); 5073 error = EINVAL; 5074 goto fail; 5075 } 5076 5077 /* Check that all firmware sections fit. */ 5078 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5079 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5080 device_printf(sc->sc_dev, 5081 "firmware file too short: %zu bytes\n", fw->size); 5082 error = EINVAL; 5083 goto fail; 5084 } 5085 5086 /* Get pointers to firmware sections. */ 5087 fw->main.text = (const uint8_t *)(hdr + 1); 5088 fw->main.data = fw->main.text + fw->main.textsz; 5089 fw->init.text = fw->main.data + fw->main.datasz; 5090 fw->init.data = fw->init.text + fw->init.textsz; 5091 fw->boot.text = fw->init.data + fw->init.datasz; 5092 5093 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5094 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5095 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5096 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5097 fw->main.textsz, fw->main.datasz, 5098 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5099 5100 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5101 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5102 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5103 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5104 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5105 5106 return 0; 5107 5108 fail: wpi_unload_firmware(sc); 5109 return error; 5110 } 5111 5112 /** 5113 * Free the referenced firmware image 5114 */ 5115 static void 5116 wpi_unload_firmware(struct wpi_softc *sc) 5117 { 5118 if (sc->fw_fp != NULL) { 5119 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5120 sc->fw_fp = NULL; 5121 } 5122 } 5123 5124 static int 5125 wpi_clock_wait(struct wpi_softc *sc) 5126 { 5127 int ntries; 5128 5129 /* Set "initialization complete" bit. */ 5130 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5131 5132 /* Wait for clock stabilization. */ 5133 for (ntries = 0; ntries < 2500; ntries++) { 5134 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5135 return 0; 5136 DELAY(100); 5137 } 5138 device_printf(sc->sc_dev, 5139 "%s: timeout waiting for clock stabilization\n", __func__); 5140 5141 return ETIMEDOUT; 5142 } 5143 5144 static int 5145 wpi_apm_init(struct wpi_softc *sc) 5146 { 5147 uint32_t reg; 5148 int error; 5149 5150 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5151 5152 /* Disable L0s exit timer (NMI bug workaround). */ 5153 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5154 /* Don't wait for ICH L0s (ICH bug workaround). */ 5155 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5156 5157 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5158 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5159 5160 /* Retrieve PCIe Active State Power Management (ASPM). */ 5161 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5162 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5163 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5164 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5165 else 5166 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5167 5168 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5169 5170 /* Wait for clock stabilization before accessing prph. */ 5171 if ((error = wpi_clock_wait(sc)) != 0) 5172 return error; 5173 5174 if ((error = wpi_nic_lock(sc)) != 0) 5175 return error; 5176 /* Cleanup. */ 5177 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5178 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5179 5180 /* Enable DMA and BSM (Bootstrap State Machine). */ 5181 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5182 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5183 DELAY(20); 5184 /* Disable L1-Active. */ 5185 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5186 wpi_nic_unlock(sc); 5187 5188 return 0; 5189 } 5190 5191 static void 5192 wpi_apm_stop_master(struct wpi_softc *sc) 5193 { 5194 int ntries; 5195 5196 /* Stop busmaster DMA activity. */ 5197 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5198 5199 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5200 WPI_GP_CNTRL_MAC_PS) 5201 return; /* Already asleep. */ 5202 5203 for (ntries = 0; ntries < 100; ntries++) { 5204 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5205 return; 5206 DELAY(10); 5207 } 5208 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5209 __func__); 5210 } 5211 5212 static void 5213 wpi_apm_stop(struct wpi_softc *sc) 5214 { 5215 wpi_apm_stop_master(sc); 5216 5217 /* Reset the entire device. */ 5218 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5219 DELAY(10); 5220 /* Clear "initialization complete" bit. */ 5221 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5222 } 5223 5224 static void 5225 wpi_nic_config(struct wpi_softc *sc) 5226 { 5227 uint32_t rev; 5228 5229 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5230 5231 /* voodoo from the Linux "driver".. */ 5232 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5233 if ((rev & 0xc0) == 0x40) 5234 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5235 else if (!(rev & 0x80)) 5236 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5237 5238 if (sc->cap == 0x80) 5239 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5240 5241 if ((sc->rev & 0xf0) == 0xd0) 5242 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5243 else 5244 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5245 5246 if (sc->type > 1) 5247 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5248 } 5249 5250 static int 5251 wpi_hw_init(struct wpi_softc *sc) 5252 { 5253 uint8_t chnl; 5254 int ntries, error; 5255 5256 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5257 5258 /* Clear pending interrupts. */ 5259 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5260 5261 if ((error = wpi_apm_init(sc)) != 0) { 5262 device_printf(sc->sc_dev, 5263 "%s: could not power ON adapter, error %d\n", __func__, 5264 error); 5265 return error; 5266 } 5267 5268 /* Select VMAIN power source. */ 5269 if ((error = wpi_nic_lock(sc)) != 0) 5270 return error; 5271 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5272 wpi_nic_unlock(sc); 5273 /* Spin until VMAIN gets selected. */ 5274 for (ntries = 0; ntries < 5000; ntries++) { 5275 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5276 break; 5277 DELAY(10); 5278 } 5279 if (ntries == 5000) { 5280 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5281 return ETIMEDOUT; 5282 } 5283 5284 /* Perform adapter initialization. */ 5285 wpi_nic_config(sc); 5286 5287 /* Initialize RX ring. */ 5288 if ((error = wpi_nic_lock(sc)) != 0) 5289 return error; 5290 /* Set physical address of RX ring. */ 5291 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5292 /* Set physical address of RX read pointer. */ 5293 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5294 offsetof(struct wpi_shared, next)); 5295 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5296 /* Enable RX. */ 5297 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5298 WPI_FH_RX_CONFIG_DMA_ENA | 5299 WPI_FH_RX_CONFIG_RDRBD_ENA | 5300 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5301 WPI_FH_RX_CONFIG_MAXFRAG | 5302 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5303 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5304 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5305 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5306 wpi_nic_unlock(sc); 5307 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5308 5309 /* Initialize TX rings. */ 5310 if ((error = wpi_nic_lock(sc)) != 0) 5311 return error; 5312 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5313 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5314 /* Enable all 6 TX rings. */ 5315 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5316 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5317 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5318 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5319 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5320 /* Set physical address of TX rings. */ 5321 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5322 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5323 5324 /* Enable all DMA channels. */ 5325 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5326 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5327 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5328 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5329 } 5330 wpi_nic_unlock(sc); 5331 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5332 5333 /* Clear "radio off" and "commands blocked" bits. */ 5334 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5335 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5336 5337 /* Clear pending interrupts. */ 5338 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5339 /* Enable interrupts. */ 5340 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5341 5342 /* _Really_ make sure "radio off" bit is cleared! */ 5343 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5344 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5345 5346 if ((error = wpi_load_firmware(sc)) != 0) { 5347 device_printf(sc->sc_dev, 5348 "%s: could not load firmware, error %d\n", __func__, 5349 error); 5350 return error; 5351 } 5352 /* Wait at most one second for firmware alive notification. */ 5353 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5354 device_printf(sc->sc_dev, 5355 "%s: timeout waiting for adapter to initialize, error %d\n", 5356 __func__, error); 5357 return error; 5358 } 5359 5360 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5361 5362 /* Do post-firmware initialization. */ 5363 return wpi_post_alive(sc); 5364 } 5365 5366 static void 5367 wpi_hw_stop(struct wpi_softc *sc) 5368 { 5369 uint8_t chnl, qid; 5370 int ntries; 5371 5372 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5373 5374 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5375 wpi_nic_lock(sc); 5376 5377 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5378 5379 /* Disable interrupts. */ 5380 WPI_WRITE(sc, WPI_INT_MASK, 0); 5381 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5382 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5383 5384 /* Make sure we no longer hold the NIC lock. */ 5385 wpi_nic_unlock(sc); 5386 5387 if (wpi_nic_lock(sc) == 0) { 5388 /* Stop TX scheduler. */ 5389 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5390 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5391 5392 /* Stop all DMA channels. */ 5393 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5394 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5395 for (ntries = 0; ntries < 200; ntries++) { 5396 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5397 WPI_FH_TX_STATUS_IDLE(chnl)) 5398 break; 5399 DELAY(10); 5400 } 5401 } 5402 wpi_nic_unlock(sc); 5403 } 5404 5405 /* Stop RX ring. */ 5406 wpi_reset_rx_ring(sc); 5407 5408 /* Reset all TX rings. */ 5409 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5410 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5411 5412 if (wpi_nic_lock(sc) == 0) { 5413 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5414 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5415 wpi_nic_unlock(sc); 5416 } 5417 DELAY(5); 5418 /* Power OFF adapter. */ 5419 wpi_apm_stop(sc); 5420 } 5421 5422 static void 5423 wpi_radio_on(void *arg0, int pending) 5424 { 5425 struct wpi_softc *sc = arg0; 5426 struct ieee80211com *ic = &sc->sc_ic; 5427 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5428 5429 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5430 5431 WPI_LOCK(sc); 5432 callout_stop(&sc->watchdog_rfkill); 5433 WPI_UNLOCK(sc); 5434 5435 if (vap != NULL) 5436 ieee80211_init(vap); 5437 } 5438 5439 static void 5440 wpi_radio_off(void *arg0, int pending) 5441 { 5442 struct wpi_softc *sc = arg0; 5443 struct ieee80211com *ic = &sc->sc_ic; 5444 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5445 5446 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5447 5448 ieee80211_notify_radio(ic, 0); 5449 wpi_stop(sc); 5450 if (vap != NULL) 5451 ieee80211_stop(vap); 5452 5453 WPI_LOCK(sc); 5454 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5455 WPI_UNLOCK(sc); 5456 } 5457 5458 static int 5459 wpi_init(struct wpi_softc *sc) 5460 { 5461 int error = 0; 5462 5463 WPI_LOCK(sc); 5464 5465 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5466 5467 if (sc->sc_running != 0) 5468 goto end; 5469 5470 /* Check that the radio is not disabled by hardware switch. */ 5471 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5472 device_printf(sc->sc_dev, 5473 "RF switch: radio disabled (%s)\n", __func__); 5474 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5475 sc); 5476 error = EINPROGRESS; 5477 goto end; 5478 } 5479 5480 /* Read firmware images from the filesystem. */ 5481 if ((error = wpi_read_firmware(sc)) != 0) { 5482 device_printf(sc->sc_dev, 5483 "%s: could not read firmware, error %d\n", __func__, 5484 error); 5485 goto end; 5486 } 5487 5488 sc->sc_running = 1; 5489 5490 /* Initialize hardware and upload firmware. */ 5491 error = wpi_hw_init(sc); 5492 wpi_unload_firmware(sc); 5493 if (error != 0) { 5494 device_printf(sc->sc_dev, 5495 "%s: could not initialize hardware, error %d\n", __func__, 5496 error); 5497 goto fail; 5498 } 5499 5500 /* Configure adapter now that it is ready. */ 5501 if ((error = wpi_config(sc)) != 0) { 5502 device_printf(sc->sc_dev, 5503 "%s: could not configure device, error %d\n", __func__, 5504 error); 5505 goto fail; 5506 } 5507 5508 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5509 5510 WPI_UNLOCK(sc); 5511 5512 return 0; 5513 5514 fail: wpi_stop_locked(sc); 5515 5516 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5517 WPI_UNLOCK(sc); 5518 5519 return error; 5520 } 5521 5522 static void 5523 wpi_stop_locked(struct wpi_softc *sc) 5524 { 5525 5526 WPI_LOCK_ASSERT(sc); 5527 5528 if (sc->sc_running == 0) 5529 return; 5530 5531 WPI_TX_LOCK(sc); 5532 WPI_TXQ_LOCK(sc); 5533 sc->sc_running = 0; 5534 WPI_TXQ_UNLOCK(sc); 5535 WPI_TX_UNLOCK(sc); 5536 5537 WPI_TXQ_STATE_LOCK(sc); 5538 callout_stop(&sc->tx_timeout); 5539 WPI_TXQ_STATE_UNLOCK(sc); 5540 5541 WPI_RXON_LOCK(sc); 5542 callout_stop(&sc->scan_timeout); 5543 callout_stop(&sc->calib_to); 5544 WPI_RXON_UNLOCK(sc); 5545 5546 /* Power OFF hardware. */ 5547 wpi_hw_stop(sc); 5548 } 5549 5550 static void 5551 wpi_stop(struct wpi_softc *sc) 5552 { 5553 WPI_LOCK(sc); 5554 wpi_stop_locked(sc); 5555 WPI_UNLOCK(sc); 5556 } 5557 5558 /* 5559 * Callback from net80211 to start a scan. 5560 */ 5561 static void 5562 wpi_scan_start(struct ieee80211com *ic) 5563 { 5564 struct wpi_softc *sc = ic->ic_softc; 5565 5566 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5567 } 5568 5569 /* 5570 * Callback from net80211 to terminate a scan. 5571 */ 5572 static void 5573 wpi_scan_end(struct ieee80211com *ic) 5574 { 5575 struct wpi_softc *sc = ic->ic_softc; 5576 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5577 5578 if (vap->iv_state == IEEE80211_S_RUN) 5579 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5580 } 5581 5582 /** 5583 * Called by the net80211 framework to indicate to the driver 5584 * that the channel should be changed 5585 */ 5586 static void 5587 wpi_set_channel(struct ieee80211com *ic) 5588 { 5589 const struct ieee80211_channel *c = ic->ic_curchan; 5590 struct wpi_softc *sc = ic->ic_softc; 5591 int error; 5592 5593 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5594 5595 WPI_LOCK(sc); 5596 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5597 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5598 WPI_UNLOCK(sc); 5599 WPI_TX_LOCK(sc); 5600 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5601 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5602 WPI_TX_UNLOCK(sc); 5603 5604 /* 5605 * Only need to set the channel in Monitor mode. AP scanning and auth 5606 * are already taken care of by their respective firmware commands. 5607 */ 5608 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5609 WPI_RXON_LOCK(sc); 5610 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5611 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5612 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5613 WPI_RXON_24GHZ); 5614 } else { 5615 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5616 WPI_RXON_24GHZ); 5617 } 5618 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5619 device_printf(sc->sc_dev, 5620 "%s: error %d setting channel\n", __func__, 5621 error); 5622 WPI_RXON_UNLOCK(sc); 5623 } 5624 } 5625 5626 /** 5627 * Called by net80211 to indicate that we need to scan the current 5628 * channel. The channel is previously be set via the wpi_set_channel 5629 * callback. 5630 */ 5631 static void 5632 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5633 { 5634 struct ieee80211vap *vap = ss->ss_vap; 5635 struct ieee80211com *ic = vap->iv_ic; 5636 struct wpi_softc *sc = ic->ic_softc; 5637 int error; 5638 5639 WPI_RXON_LOCK(sc); 5640 error = wpi_scan(sc, ic->ic_curchan); 5641 WPI_RXON_UNLOCK(sc); 5642 if (error != 0) 5643 ieee80211_cancel_scan(vap); 5644 } 5645 5646 /** 5647 * Called by the net80211 framework to indicate 5648 * the minimum dwell time has been met, terminate the scan. 5649 * We don't actually terminate the scan as the firmware will notify 5650 * us when it's finished and we have no way to interrupt it. 5651 */ 5652 static void 5653 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5654 { 5655 /* NB: don't try to abort scan; wait for firmware to finish */ 5656 } 5657 5658 static void 5659 wpi_hw_reset(void *arg, int pending) 5660 { 5661 struct wpi_softc *sc = arg; 5662 struct ieee80211com *ic = &sc->sc_ic; 5663 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5664 5665 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5666 5667 ieee80211_notify_radio(ic, 0); 5668 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5669 ieee80211_cancel_scan(vap); 5670 5671 wpi_stop(sc); 5672 if (vap != NULL) { 5673 ieee80211_stop(vap); 5674 ieee80211_init(vap); 5675 } 5676 } 5677