1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 /* 24 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 25 * 26 * The 3945ABG network adapter doesn't use traditional hardware as 27 * many other adaptors do. Instead at run time the eeprom is set into a known 28 * state and told to load boot firmware. The boot firmware loads an init and a 29 * main binary firmware image into SRAM on the card via DMA. 30 * Once the firmware is loaded, the driver/hw then 31 * communicate by way of circular dma rings via the SRAM to the firmware. 32 * 33 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 34 * The 4 tx data rings allow for prioritization QoS. 35 * 36 * The rx data ring consists of 32 dma buffers. Two registers are used to 37 * indicate where in the ring the driver and the firmware are up to. The 38 * driver sets the initial read index (reg1) and the initial write index (reg2), 39 * the firmware updates the read index (reg1) on rx of a packet and fires an 40 * interrupt. The driver then processes the buffers starting at reg1 indicating 41 * to the firmware which buffers have been accessed by updating reg2. At the 42 * same time allocating new memory for the processed buffer. 43 * 44 * A similar thing happens with the tx rings. The difference is the firmware 45 * stop processing buffers once the queue is full and until confirmation 46 * of a successful transmition (tx_done) has occurred. 47 * 48 * The command ring operates in the same manner as the tx queues. 49 * 50 * All communication direct to the card (ie eeprom) is classed as Stage1 51 * communication 52 * 53 * All communication via the firmware to the card is classed as State2. 54 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 55 * firmware. The bootstrap firmware and runtime firmware are loaded 56 * from host memory via dma to the card then told to execute. From this point 57 * on the majority of communications between the driver and the card goes 58 * via the firmware. 59 */ 60 61 #include "opt_wlan.h" 62 #include "opt_wpi.h" 63 64 #include <sys/param.h> 65 #include <sys/sysctl.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 #include <sys/systm.h> 71 #include <sys/malloc.h> 72 #include <sys/queue.h> 73 #include <sys/taskqueue.h> 74 #include <sys/module.h> 75 #include <sys/bus.h> 76 #include <sys/endian.h> 77 #include <sys/linker.h> 78 #include <sys/firmware.h> 79 80 #include <machine/bus.h> 81 #include <machine/resource.h> 82 #include <sys/rman.h> 83 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 87 #include <net/bpf.h> 88 #include <net/if.h> 89 #include <net/if_var.h> 90 #include <net/if_arp.h> 91 #include <net/ethernet.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/if_types.h> 95 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/in_var.h> 99 #include <netinet/if_ether.h> 100 #include <netinet/ip.h> 101 102 #include <net80211/ieee80211_var.h> 103 #include <net80211/ieee80211_radiotap.h> 104 #include <net80211/ieee80211_regdomain.h> 105 #include <net80211/ieee80211_ratectl.h> 106 107 #include <dev/wpi/if_wpireg.h> 108 #include <dev/wpi/if_wpivar.h> 109 #include <dev/wpi/if_wpi_debug.h> 110 111 struct wpi_ident { 112 uint16_t vendor; 113 uint16_t device; 114 uint16_t subdevice; 115 const char *name; 116 }; 117 118 static const struct wpi_ident wpi_ident_table[] = { 119 /* The below entries support ABG regardless of the subid */ 120 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 122 /* The below entries only support BG */ 123 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 127 { 0, 0, 0, NULL } 128 }; 129 130 static int wpi_probe(device_t); 131 static int wpi_attach(device_t); 132 static void wpi_radiotap_attach(struct wpi_softc *); 133 static void wpi_sysctlattach(struct wpi_softc *); 134 static void wpi_init_beacon(struct wpi_vap *); 135 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 136 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137 const uint8_t [IEEE80211_ADDR_LEN], 138 const uint8_t [IEEE80211_ADDR_LEN]); 139 static void wpi_vap_delete(struct ieee80211vap *); 140 static int wpi_detach(device_t); 141 static int wpi_shutdown(device_t); 142 static int wpi_suspend(device_t); 143 static int wpi_resume(device_t); 144 static int wpi_nic_lock(struct wpi_softc *); 145 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 146 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 147 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 148 void **, bus_size_t, bus_size_t); 149 static void wpi_dma_contig_free(struct wpi_dma_info *); 150 static int wpi_alloc_shared(struct wpi_softc *); 151 static void wpi_free_shared(struct wpi_softc *); 152 static int wpi_alloc_fwmem(struct wpi_softc *); 153 static void wpi_free_fwmem(struct wpi_softc *); 154 static int wpi_alloc_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring(struct wpi_softc *); 156 static void wpi_update_rx_ring_ps(struct wpi_softc *); 157 static void wpi_reset_rx_ring(struct wpi_softc *); 158 static void wpi_free_rx_ring(struct wpi_softc *); 159 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 160 uint8_t); 161 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static void wpi_update_tx_ring_ps(struct wpi_softc *, 163 struct wpi_tx_ring *); 164 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 166 static int wpi_read_eeprom(struct wpi_softc *, 167 uint8_t macaddr[IEEE80211_ADDR_LEN]); 168 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 169 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *, 170 struct ieee80211_channel[]); 171 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 172 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 173 struct ieee80211_channel *); 174 static void wpi_getradiocaps(struct ieee80211com *, int, int *, 175 struct ieee80211_channel[]); 176 static int wpi_setregdomain(struct ieee80211com *, 177 struct ieee80211_regdomain *, int, 178 struct ieee80211_channel[]); 179 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 180 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 181 const uint8_t mac[IEEE80211_ADDR_LEN]); 182 static void wpi_node_free(struct ieee80211_node *); 183 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 184 const struct ieee80211_rx_stats *, 185 int, int); 186 static void wpi_restore_node(void *, struct ieee80211_node *); 187 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 188 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189 static void wpi_calib_timeout(void *); 190 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 191 struct wpi_rx_data *); 192 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 193 struct wpi_rx_data *); 194 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 195 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 196 static void wpi_notif_intr(struct wpi_softc *); 197 static void wpi_wakeup_intr(struct wpi_softc *); 198 #ifdef WPI_DEBUG 199 static void wpi_debug_registers(struct wpi_softc *); 200 #endif 201 static void wpi_fatal_intr(struct wpi_softc *); 202 static void wpi_intr(void *); 203 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 204 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 205 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 206 struct ieee80211_node *); 207 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 208 struct ieee80211_node *, 209 const struct ieee80211_bpf_params *); 210 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 211 const struct ieee80211_bpf_params *); 212 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 213 static void wpi_watchdog_rfkill(void *); 214 static void wpi_scan_timeout(void *); 215 static void wpi_tx_timeout(void *); 216 static void wpi_parent(struct ieee80211com *); 217 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 218 int); 219 static int wpi_mrr_setup(struct wpi_softc *); 220 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 221 static int wpi_add_broadcast_node(struct wpi_softc *, int); 222 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 223 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 224 static int wpi_updateedca(struct ieee80211com *); 225 static void wpi_set_promisc(struct wpi_softc *); 226 static void wpi_update_promisc(struct ieee80211com *); 227 static void wpi_update_mcast(struct ieee80211com *); 228 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 229 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 230 static void wpi_power_calibration(struct wpi_softc *); 231 static int wpi_set_txpower(struct wpi_softc *, int); 232 static int wpi_get_power_index(struct wpi_softc *, 233 struct wpi_power_group *, uint8_t, int, int); 234 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 235 static int wpi_send_btcoex(struct wpi_softc *); 236 static int wpi_send_rxon(struct wpi_softc *, int, int); 237 static int wpi_config(struct wpi_softc *); 238 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 239 struct ieee80211_channel *, uint8_t); 240 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 241 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 242 struct ieee80211_channel *); 243 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 244 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 245 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 246 static int wpi_config_beacon(struct wpi_vap *); 247 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 248 static void wpi_update_beacon(struct ieee80211vap *, int); 249 static void wpi_newassoc(struct ieee80211_node *, int); 250 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 251 static int wpi_load_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_load_key_cb(void *, struct ieee80211_node *); 254 static int wpi_set_global_keys(struct ieee80211_node *); 255 static int wpi_del_key(struct ieee80211_node *, 256 const struct ieee80211_key *); 257 static void wpi_del_key_cb(void *, struct ieee80211_node *); 258 static int wpi_process_key(struct ieee80211vap *, 259 const struct ieee80211_key *, int); 260 static int wpi_key_set(struct ieee80211vap *, 261 const struct ieee80211_key *); 262 static int wpi_key_delete(struct ieee80211vap *, 263 const struct ieee80211_key *); 264 static int wpi_post_alive(struct wpi_softc *); 265 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 266 uint32_t); 267 static int wpi_load_firmware(struct wpi_softc *); 268 static int wpi_read_firmware(struct wpi_softc *); 269 static void wpi_unload_firmware(struct wpi_softc *); 270 static int wpi_clock_wait(struct wpi_softc *); 271 static int wpi_apm_init(struct wpi_softc *); 272 static void wpi_apm_stop_master(struct wpi_softc *); 273 static void wpi_apm_stop(struct wpi_softc *); 274 static void wpi_nic_config(struct wpi_softc *); 275 static int wpi_hw_init(struct wpi_softc *); 276 static void wpi_hw_stop(struct wpi_softc *); 277 static void wpi_radio_on(void *, int); 278 static void wpi_radio_off(void *, int); 279 static int wpi_init(struct wpi_softc *); 280 static void wpi_stop_locked(struct wpi_softc *); 281 static void wpi_stop(struct wpi_softc *); 282 static void wpi_scan_start(struct ieee80211com *); 283 static void wpi_scan_end(struct ieee80211com *); 284 static void wpi_set_channel(struct ieee80211com *); 285 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 286 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 287 288 static device_method_t wpi_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, wpi_probe), 291 DEVMETHOD(device_attach, wpi_attach), 292 DEVMETHOD(device_detach, wpi_detach), 293 DEVMETHOD(device_shutdown, wpi_shutdown), 294 DEVMETHOD(device_suspend, wpi_suspend), 295 DEVMETHOD(device_resume, wpi_resume), 296 297 DEVMETHOD_END 298 }; 299 300 static driver_t wpi_driver = { 301 "wpi", 302 wpi_methods, 303 sizeof (struct wpi_softc) 304 }; 305 static devclass_t wpi_devclass; 306 307 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 308 309 MODULE_VERSION(wpi, 1); 310 311 MODULE_DEPEND(wpi, pci, 1, 1, 1); 312 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 313 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 314 315 static int 316 wpi_probe(device_t dev) 317 { 318 const struct wpi_ident *ident; 319 320 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 321 if (pci_get_vendor(dev) == ident->vendor && 322 pci_get_device(dev) == ident->device) { 323 device_set_desc(dev, ident->name); 324 return (BUS_PROBE_DEFAULT); 325 } 326 } 327 return ENXIO; 328 } 329 330 static int 331 wpi_attach(device_t dev) 332 { 333 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 334 struct ieee80211com *ic; 335 uint8_t i; 336 int error, rid; 337 #ifdef WPI_DEBUG 338 int supportsa = 1; 339 const struct wpi_ident *ident; 340 #endif 341 342 sc->sc_dev = dev; 343 344 #ifdef WPI_DEBUG 345 error = resource_int_value(device_get_name(sc->sc_dev), 346 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 347 if (error != 0) 348 sc->sc_debug = 0; 349 #else 350 sc->sc_debug = 0; 351 #endif 352 353 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 354 355 /* 356 * Get the offset of the PCI Express Capability Structure in PCI 357 * Configuration Space. 358 */ 359 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 360 if (error != 0) { 361 device_printf(dev, "PCIe capability structure not found!\n"); 362 return error; 363 } 364 365 /* 366 * Some card's only support 802.11b/g not a, check to see if 367 * this is one such card. A 0x0 in the subdevice table indicates 368 * the entire subdevice range is to be ignored. 369 */ 370 #ifdef WPI_DEBUG 371 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 372 if (ident->subdevice && 373 pci_get_subdevice(dev) == ident->subdevice) { 374 supportsa = 0; 375 break; 376 } 377 } 378 #endif 379 380 /* Clear device-specific "PCI retry timeout" register (41h). */ 381 pci_write_config(dev, 0x41, 0, 1); 382 383 /* Enable bus-mastering. */ 384 pci_enable_busmaster(dev); 385 386 rid = PCIR_BAR(0); 387 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 388 RF_ACTIVE); 389 if (sc->mem == NULL) { 390 device_printf(dev, "can't map mem space\n"); 391 return ENOMEM; 392 } 393 sc->sc_st = rman_get_bustag(sc->mem); 394 sc->sc_sh = rman_get_bushandle(sc->mem); 395 396 rid = 1; 397 if (pci_alloc_msi(dev, &rid) == 0) 398 rid = 1; 399 else 400 rid = 0; 401 /* Install interrupt handler. */ 402 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 403 (rid != 0 ? 0 : RF_SHAREABLE)); 404 if (sc->irq == NULL) { 405 device_printf(dev, "can't map interrupt\n"); 406 error = ENOMEM; 407 goto fail; 408 } 409 410 WPI_LOCK_INIT(sc); 411 WPI_TX_LOCK_INIT(sc); 412 WPI_RXON_LOCK_INIT(sc); 413 WPI_NT_LOCK_INIT(sc); 414 WPI_TXQ_LOCK_INIT(sc); 415 WPI_TXQ_STATE_LOCK_INIT(sc); 416 417 /* Allocate DMA memory for firmware transfers. */ 418 if ((error = wpi_alloc_fwmem(sc)) != 0) { 419 device_printf(dev, 420 "could not allocate memory for firmware, error %d\n", 421 error); 422 goto fail; 423 } 424 425 /* Allocate shared page. */ 426 if ((error = wpi_alloc_shared(sc)) != 0) { 427 device_printf(dev, "could not allocate shared page\n"); 428 goto fail; 429 } 430 431 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 432 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 433 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 434 device_printf(dev, 435 "could not allocate TX ring %d, error %d\n", i, 436 error); 437 goto fail; 438 } 439 } 440 441 /* Allocate RX ring. */ 442 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 443 device_printf(dev, "could not allocate RX ring, error %d\n", 444 error); 445 goto fail; 446 } 447 448 /* Clear pending interrupts. */ 449 WPI_WRITE(sc, WPI_INT, 0xffffffff); 450 451 ic = &sc->sc_ic; 452 ic->ic_softc = sc; 453 ic->ic_name = device_get_nameunit(dev); 454 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 455 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 456 457 /* Set device capabilities. */ 458 ic->ic_caps = 459 IEEE80211_C_STA /* station mode supported */ 460 | IEEE80211_C_IBSS /* IBSS mode supported */ 461 | IEEE80211_C_HOSTAP /* Host access point mode */ 462 | IEEE80211_C_MONITOR /* monitor mode supported */ 463 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 464 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 465 | IEEE80211_C_TXFRAG /* handle tx frags */ 466 | IEEE80211_C_TXPMGT /* tx power management */ 467 | IEEE80211_C_SHSLOT /* short slot time supported */ 468 | IEEE80211_C_WPA /* 802.11i */ 469 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 470 | IEEE80211_C_WME /* 802.11e */ 471 | IEEE80211_C_PMGT /* Station-side power mgmt */ 472 ; 473 474 ic->ic_cryptocaps = 475 IEEE80211_CRYPTO_AES_CCM; 476 477 /* 478 * Read in the eeprom and also setup the channels for 479 * net80211. We don't set the rates as net80211 does this for us 480 */ 481 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 482 device_printf(dev, "could not read EEPROM, error %d\n", 483 error); 484 goto fail; 485 } 486 487 #ifdef WPI_DEBUG 488 if (bootverbose) { 489 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 490 sc->domain); 491 device_printf(sc->sc_dev, "Hardware Type: %c\n", 492 sc->type > 1 ? 'B': '?'); 493 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 494 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 495 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 496 supportsa ? "does" : "does not"); 497 498 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 499 check what sc->rev really represents - benjsc 20070615 */ 500 } 501 #endif 502 503 ieee80211_ifattach(ic); 504 ic->ic_vap_create = wpi_vap_create; 505 ic->ic_vap_delete = wpi_vap_delete; 506 ic->ic_parent = wpi_parent; 507 ic->ic_raw_xmit = wpi_raw_xmit; 508 ic->ic_transmit = wpi_transmit; 509 ic->ic_node_alloc = wpi_node_alloc; 510 sc->sc_node_free = ic->ic_node_free; 511 ic->ic_node_free = wpi_node_free; 512 ic->ic_wme.wme_update = wpi_updateedca; 513 ic->ic_update_promisc = wpi_update_promisc; 514 ic->ic_update_mcast = wpi_update_mcast; 515 ic->ic_newassoc = wpi_newassoc; 516 ic->ic_scan_start = wpi_scan_start; 517 ic->ic_scan_end = wpi_scan_end; 518 ic->ic_set_channel = wpi_set_channel; 519 ic->ic_scan_curchan = wpi_scan_curchan; 520 ic->ic_scan_mindwell = wpi_scan_mindwell; 521 ic->ic_getradiocaps = wpi_getradiocaps; 522 ic->ic_setregdomain = wpi_setregdomain; 523 524 sc->sc_update_rx_ring = wpi_update_rx_ring; 525 sc->sc_update_tx_ring = wpi_update_tx_ring; 526 527 wpi_radiotap_attach(sc); 528 529 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 530 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 531 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 532 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 533 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 534 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 535 536 wpi_sysctlattach(sc); 537 538 /* 539 * Hook our interrupt after all initialization is complete. 540 */ 541 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 542 NULL, wpi_intr, sc, &sc->sc_ih); 543 if (error != 0) { 544 device_printf(dev, "can't establish interrupt, error %d\n", 545 error); 546 goto fail; 547 } 548 549 if (bootverbose) 550 ieee80211_announce(ic); 551 552 #ifdef WPI_DEBUG 553 if (sc->sc_debug & WPI_DEBUG_HW) 554 ieee80211_announce_channels(ic); 555 #endif 556 557 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 558 return 0; 559 560 fail: wpi_detach(dev); 561 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 562 return error; 563 } 564 565 /* 566 * Attach the interface to 802.11 radiotap. 567 */ 568 static void 569 wpi_radiotap_attach(struct wpi_softc *sc) 570 { 571 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 572 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 573 574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 575 ieee80211_radiotap_attach(&sc->sc_ic, 576 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 577 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 579 } 580 581 static void 582 wpi_sysctlattach(struct wpi_softc *sc) 583 { 584 #ifdef WPI_DEBUG 585 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 586 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 587 588 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 589 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 590 "control debugging printfs"); 591 #endif 592 } 593 594 static void 595 wpi_init_beacon(struct wpi_vap *wvp) 596 { 597 struct wpi_buf *bcn = &wvp->wv_bcbuf; 598 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 599 600 cmd->id = WPI_ID_BROADCAST; 601 cmd->ofdm_mask = 0xff; 602 cmd->cck_mask = 0x0f; 603 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 604 605 /* 606 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 607 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 608 */ 609 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 610 611 bcn->code = WPI_CMD_SET_BEACON; 612 bcn->ac = WPI_CMD_QUEUE_NUM; 613 bcn->size = sizeof(struct wpi_cmd_beacon); 614 } 615 616 static struct ieee80211vap * 617 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 618 enum ieee80211_opmode opmode, int flags, 619 const uint8_t bssid[IEEE80211_ADDR_LEN], 620 const uint8_t mac[IEEE80211_ADDR_LEN]) 621 { 622 struct wpi_vap *wvp; 623 struct ieee80211vap *vap; 624 625 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 626 return NULL; 627 628 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 629 vap = &wvp->wv_vap; 630 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 631 632 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 633 WPI_VAP_LOCK_INIT(wvp); 634 wpi_init_beacon(wvp); 635 } 636 637 /* Override with driver methods. */ 638 vap->iv_key_set = wpi_key_set; 639 vap->iv_key_delete = wpi_key_delete; 640 if (opmode == IEEE80211_M_IBSS) { 641 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 642 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 643 } 644 wvp->wv_newstate = vap->iv_newstate; 645 vap->iv_newstate = wpi_newstate; 646 vap->iv_update_beacon = wpi_update_beacon; 647 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 648 649 ieee80211_ratectl_init(vap); 650 /* Complete setup. */ 651 ieee80211_vap_attach(vap, ieee80211_media_change, 652 ieee80211_media_status, mac); 653 ic->ic_opmode = opmode; 654 return vap; 655 } 656 657 static void 658 wpi_vap_delete(struct ieee80211vap *vap) 659 { 660 struct wpi_vap *wvp = WPI_VAP(vap); 661 struct wpi_buf *bcn = &wvp->wv_bcbuf; 662 enum ieee80211_opmode opmode = vap->iv_opmode; 663 664 ieee80211_ratectl_deinit(vap); 665 ieee80211_vap_detach(vap); 666 667 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 668 if (bcn->m != NULL) 669 m_freem(bcn->m); 670 671 WPI_VAP_LOCK_DESTROY(wvp); 672 } 673 674 free(wvp, M_80211_VAP); 675 } 676 677 static int 678 wpi_detach(device_t dev) 679 { 680 struct wpi_softc *sc = device_get_softc(dev); 681 struct ieee80211com *ic = &sc->sc_ic; 682 uint8_t qid; 683 684 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 685 686 if (ic->ic_vap_create == wpi_vap_create) { 687 ieee80211_draintask(ic, &sc->sc_radioon_task); 688 ieee80211_draintask(ic, &sc->sc_radiooff_task); 689 690 wpi_stop(sc); 691 692 callout_drain(&sc->watchdog_rfkill); 693 callout_drain(&sc->tx_timeout); 694 callout_drain(&sc->scan_timeout); 695 callout_drain(&sc->calib_to); 696 ieee80211_ifdetach(ic); 697 } 698 699 /* Uninstall interrupt handler. */ 700 if (sc->irq != NULL) { 701 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 702 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 703 sc->irq); 704 pci_release_msi(dev); 705 } 706 707 if (sc->txq[0].data_dmat) { 708 /* Free DMA resources. */ 709 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 710 wpi_free_tx_ring(sc, &sc->txq[qid]); 711 712 wpi_free_rx_ring(sc); 713 wpi_free_shared(sc); 714 } 715 716 if (sc->fw_dma.tag) 717 wpi_free_fwmem(sc); 718 719 if (sc->mem != NULL) 720 bus_release_resource(dev, SYS_RES_MEMORY, 721 rman_get_rid(sc->mem), sc->mem); 722 723 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 724 WPI_TXQ_STATE_LOCK_DESTROY(sc); 725 WPI_TXQ_LOCK_DESTROY(sc); 726 WPI_NT_LOCK_DESTROY(sc); 727 WPI_RXON_LOCK_DESTROY(sc); 728 WPI_TX_LOCK_DESTROY(sc); 729 WPI_LOCK_DESTROY(sc); 730 return 0; 731 } 732 733 static int 734 wpi_shutdown(device_t dev) 735 { 736 struct wpi_softc *sc = device_get_softc(dev); 737 738 wpi_stop(sc); 739 return 0; 740 } 741 742 static int 743 wpi_suspend(device_t dev) 744 { 745 struct wpi_softc *sc = device_get_softc(dev); 746 struct ieee80211com *ic = &sc->sc_ic; 747 748 ieee80211_suspend_all(ic); 749 return 0; 750 } 751 752 static int 753 wpi_resume(device_t dev) 754 { 755 struct wpi_softc *sc = device_get_softc(dev); 756 struct ieee80211com *ic = &sc->sc_ic; 757 758 /* Clear device-specific "PCI retry timeout" register (41h). */ 759 pci_write_config(dev, 0x41, 0, 1); 760 761 ieee80211_resume_all(ic); 762 return 0; 763 } 764 765 /* 766 * Grab exclusive access to NIC memory. 767 */ 768 static int 769 wpi_nic_lock(struct wpi_softc *sc) 770 { 771 int ntries; 772 773 /* Request exclusive access to NIC. */ 774 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 775 776 /* Spin until we actually get the lock. */ 777 for (ntries = 0; ntries < 1000; ntries++) { 778 if ((WPI_READ(sc, WPI_GP_CNTRL) & 779 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 780 WPI_GP_CNTRL_MAC_ACCESS_ENA) 781 return 0; 782 DELAY(10); 783 } 784 785 device_printf(sc->sc_dev, "could not lock memory\n"); 786 787 return ETIMEDOUT; 788 } 789 790 /* 791 * Release lock on NIC memory. 792 */ 793 static __inline void 794 wpi_nic_unlock(struct wpi_softc *sc) 795 { 796 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 797 } 798 799 static __inline uint32_t 800 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 801 { 802 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 803 WPI_BARRIER_READ_WRITE(sc); 804 return WPI_READ(sc, WPI_PRPH_RDATA); 805 } 806 807 static __inline void 808 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 809 { 810 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 811 WPI_BARRIER_WRITE(sc); 812 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 813 } 814 815 static __inline void 816 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 817 { 818 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 819 } 820 821 static __inline void 822 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 823 { 824 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 825 } 826 827 static __inline void 828 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 829 const uint32_t *data, uint32_t count) 830 { 831 for (; count != 0; count--, data++, addr += 4) 832 wpi_prph_write(sc, addr, *data); 833 } 834 835 static __inline uint32_t 836 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 837 { 838 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 839 WPI_BARRIER_READ_WRITE(sc); 840 return WPI_READ(sc, WPI_MEM_RDATA); 841 } 842 843 static __inline void 844 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 845 int count) 846 { 847 for (; count > 0; count--, addr += 4) 848 *data++ = wpi_mem_read(sc, addr); 849 } 850 851 static int 852 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 853 { 854 uint8_t *out = data; 855 uint32_t val; 856 int error, ntries; 857 858 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 859 860 if ((error = wpi_nic_lock(sc)) != 0) 861 return error; 862 863 for (; count > 0; count -= 2, addr++) { 864 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 865 for (ntries = 0; ntries < 10; ntries++) { 866 val = WPI_READ(sc, WPI_EEPROM); 867 if (val & WPI_EEPROM_READ_VALID) 868 break; 869 DELAY(5); 870 } 871 if (ntries == 10) { 872 device_printf(sc->sc_dev, 873 "timeout reading ROM at 0x%x\n", addr); 874 return ETIMEDOUT; 875 } 876 *out++= val >> 16; 877 if (count > 1) 878 *out ++= val >> 24; 879 } 880 881 wpi_nic_unlock(sc); 882 883 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 884 885 return 0; 886 } 887 888 static void 889 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 890 { 891 if (error != 0) 892 return; 893 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 894 *(bus_addr_t *)arg = segs[0].ds_addr; 895 } 896 897 /* 898 * Allocates a contiguous block of dma memory of the requested size and 899 * alignment. 900 */ 901 static int 902 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 903 void **kvap, bus_size_t size, bus_size_t alignment) 904 { 905 int error; 906 907 dma->tag = NULL; 908 dma->size = size; 909 910 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 911 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 912 1, size, 0, NULL, NULL, &dma->tag); 913 if (error != 0) 914 goto fail; 915 916 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 917 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 918 if (error != 0) 919 goto fail; 920 921 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 922 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 923 if (error != 0) 924 goto fail; 925 926 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 927 928 if (kvap != NULL) 929 *kvap = dma->vaddr; 930 931 return 0; 932 933 fail: wpi_dma_contig_free(dma); 934 return error; 935 } 936 937 static void 938 wpi_dma_contig_free(struct wpi_dma_info *dma) 939 { 940 if (dma->vaddr != NULL) { 941 bus_dmamap_sync(dma->tag, dma->map, 942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 943 bus_dmamap_unload(dma->tag, dma->map); 944 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 945 dma->vaddr = NULL; 946 } 947 if (dma->tag != NULL) { 948 bus_dma_tag_destroy(dma->tag); 949 dma->tag = NULL; 950 } 951 } 952 953 /* 954 * Allocate a shared page between host and NIC. 955 */ 956 static int 957 wpi_alloc_shared(struct wpi_softc *sc) 958 { 959 /* Shared buffer must be aligned on a 4KB boundary. */ 960 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 961 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 962 } 963 964 static void 965 wpi_free_shared(struct wpi_softc *sc) 966 { 967 wpi_dma_contig_free(&sc->shared_dma); 968 } 969 970 /* 971 * Allocate DMA-safe memory for firmware transfer. 972 */ 973 static int 974 wpi_alloc_fwmem(struct wpi_softc *sc) 975 { 976 /* Must be aligned on a 16-byte boundary. */ 977 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 978 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 979 } 980 981 static void 982 wpi_free_fwmem(struct wpi_softc *sc) 983 { 984 wpi_dma_contig_free(&sc->fw_dma); 985 } 986 987 static int 988 wpi_alloc_rx_ring(struct wpi_softc *sc) 989 { 990 struct wpi_rx_ring *ring = &sc->rxq; 991 bus_size_t size; 992 int i, error; 993 994 ring->cur = 0; 995 ring->update = 0; 996 997 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 998 999 /* Allocate RX descriptors (16KB aligned.) */ 1000 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1001 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1002 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1003 if (error != 0) { 1004 device_printf(sc->sc_dev, 1005 "%s: could not allocate RX ring DMA memory, error %d\n", 1006 __func__, error); 1007 goto fail; 1008 } 1009 1010 /* Create RX buffer DMA tag. */ 1011 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1012 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1013 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1014 if (error != 0) { 1015 device_printf(sc->sc_dev, 1016 "%s: could not create RX buf DMA tag, error %d\n", 1017 __func__, error); 1018 goto fail; 1019 } 1020 1021 /* 1022 * Allocate and map RX buffers. 1023 */ 1024 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1025 struct wpi_rx_data *data = &ring->data[i]; 1026 bus_addr_t paddr; 1027 1028 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1029 if (error != 0) { 1030 device_printf(sc->sc_dev, 1031 "%s: could not create RX buf DMA map, error %d\n", 1032 __func__, error); 1033 goto fail; 1034 } 1035 1036 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1037 if (data->m == NULL) { 1038 device_printf(sc->sc_dev, 1039 "%s: could not allocate RX mbuf\n", __func__); 1040 error = ENOBUFS; 1041 goto fail; 1042 } 1043 1044 error = bus_dmamap_load(ring->data_dmat, data->map, 1045 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1046 &paddr, BUS_DMA_NOWAIT); 1047 if (error != 0 && error != EFBIG) { 1048 device_printf(sc->sc_dev, 1049 "%s: can't map mbuf (error %d)\n", __func__, 1050 error); 1051 goto fail; 1052 } 1053 1054 /* Set physical address of RX buffer. */ 1055 ring->desc[i] = htole32(paddr); 1056 } 1057 1058 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1059 BUS_DMASYNC_PREWRITE); 1060 1061 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1062 1063 return 0; 1064 1065 fail: wpi_free_rx_ring(sc); 1066 1067 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1068 1069 return error; 1070 } 1071 1072 static void 1073 wpi_update_rx_ring(struct wpi_softc *sc) 1074 { 1075 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1076 } 1077 1078 static void 1079 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1080 { 1081 struct wpi_rx_ring *ring = &sc->rxq; 1082 1083 if (ring->update != 0) { 1084 /* Wait for INT_WAKEUP event. */ 1085 return; 1086 } 1087 1088 WPI_TXQ_LOCK(sc); 1089 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1090 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1091 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1092 __func__); 1093 ring->update = 1; 1094 } else { 1095 wpi_update_rx_ring(sc); 1096 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1097 } 1098 WPI_TXQ_UNLOCK(sc); 1099 } 1100 1101 static void 1102 wpi_reset_rx_ring(struct wpi_softc *sc) 1103 { 1104 struct wpi_rx_ring *ring = &sc->rxq; 1105 int ntries; 1106 1107 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1108 1109 if (wpi_nic_lock(sc) == 0) { 1110 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1111 for (ntries = 0; ntries < 1000; ntries++) { 1112 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1113 WPI_FH_RX_STATUS_IDLE) 1114 break; 1115 DELAY(10); 1116 } 1117 wpi_nic_unlock(sc); 1118 } 1119 1120 ring->cur = 0; 1121 ring->update = 0; 1122 } 1123 1124 static void 1125 wpi_free_rx_ring(struct wpi_softc *sc) 1126 { 1127 struct wpi_rx_ring *ring = &sc->rxq; 1128 int i; 1129 1130 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1131 1132 wpi_dma_contig_free(&ring->desc_dma); 1133 1134 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1135 struct wpi_rx_data *data = &ring->data[i]; 1136 1137 if (data->m != NULL) { 1138 bus_dmamap_sync(ring->data_dmat, data->map, 1139 BUS_DMASYNC_POSTREAD); 1140 bus_dmamap_unload(ring->data_dmat, data->map); 1141 m_freem(data->m); 1142 data->m = NULL; 1143 } 1144 if (data->map != NULL) 1145 bus_dmamap_destroy(ring->data_dmat, data->map); 1146 } 1147 if (ring->data_dmat != NULL) { 1148 bus_dma_tag_destroy(ring->data_dmat); 1149 ring->data_dmat = NULL; 1150 } 1151 } 1152 1153 static int 1154 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1155 { 1156 bus_addr_t paddr; 1157 bus_size_t size; 1158 int i, error; 1159 1160 ring->qid = qid; 1161 ring->queued = 0; 1162 ring->cur = 0; 1163 ring->pending = 0; 1164 ring->update = 0; 1165 1166 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1167 1168 /* Allocate TX descriptors (16KB aligned.) */ 1169 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1170 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1171 size, WPI_RING_DMA_ALIGN); 1172 if (error != 0) { 1173 device_printf(sc->sc_dev, 1174 "%s: could not allocate TX ring DMA memory, error %d\n", 1175 __func__, error); 1176 goto fail; 1177 } 1178 1179 /* Update shared area with ring physical address. */ 1180 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1181 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1182 BUS_DMASYNC_PREWRITE); 1183 1184 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1185 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1186 size, 4); 1187 if (error != 0) { 1188 device_printf(sc->sc_dev, 1189 "%s: could not allocate TX cmd DMA memory, error %d\n", 1190 __func__, error); 1191 goto fail; 1192 } 1193 1194 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1195 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1196 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1197 if (error != 0) { 1198 device_printf(sc->sc_dev, 1199 "%s: could not create TX buf DMA tag, error %d\n", 1200 __func__, error); 1201 goto fail; 1202 } 1203 1204 paddr = ring->cmd_dma.paddr; 1205 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1206 struct wpi_tx_data *data = &ring->data[i]; 1207 1208 data->cmd_paddr = paddr; 1209 paddr += sizeof (struct wpi_tx_cmd); 1210 1211 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1212 if (error != 0) { 1213 device_printf(sc->sc_dev, 1214 "%s: could not create TX buf DMA map, error %d\n", 1215 __func__, error); 1216 goto fail; 1217 } 1218 } 1219 1220 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1221 1222 return 0; 1223 1224 fail: wpi_free_tx_ring(sc, ring); 1225 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1226 return error; 1227 } 1228 1229 static void 1230 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1231 { 1232 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1233 } 1234 1235 static void 1236 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1237 { 1238 1239 if (ring->update != 0) { 1240 /* Wait for INT_WAKEUP event. */ 1241 return; 1242 } 1243 1244 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1245 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1246 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1247 __func__, ring->qid); 1248 ring->update = 1; 1249 } else { 1250 wpi_update_tx_ring(sc, ring); 1251 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1252 } 1253 } 1254 1255 static void 1256 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1257 { 1258 int i; 1259 1260 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1261 1262 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1263 struct wpi_tx_data *data = &ring->data[i]; 1264 1265 if (data->m != NULL) { 1266 bus_dmamap_sync(ring->data_dmat, data->map, 1267 BUS_DMASYNC_POSTWRITE); 1268 bus_dmamap_unload(ring->data_dmat, data->map); 1269 m_freem(data->m); 1270 data->m = NULL; 1271 } 1272 if (data->ni != NULL) { 1273 ieee80211_free_node(data->ni); 1274 data->ni = NULL; 1275 } 1276 } 1277 /* Clear TX descriptors. */ 1278 memset(ring->desc, 0, ring->desc_dma.size); 1279 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1280 BUS_DMASYNC_PREWRITE); 1281 ring->queued = 0; 1282 ring->cur = 0; 1283 ring->pending = 0; 1284 ring->update = 0; 1285 } 1286 1287 static void 1288 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1289 { 1290 int i; 1291 1292 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1293 1294 wpi_dma_contig_free(&ring->desc_dma); 1295 wpi_dma_contig_free(&ring->cmd_dma); 1296 1297 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1298 struct wpi_tx_data *data = &ring->data[i]; 1299 1300 if (data->m != NULL) { 1301 bus_dmamap_sync(ring->data_dmat, data->map, 1302 BUS_DMASYNC_POSTWRITE); 1303 bus_dmamap_unload(ring->data_dmat, data->map); 1304 m_freem(data->m); 1305 } 1306 if (data->map != NULL) 1307 bus_dmamap_destroy(ring->data_dmat, data->map); 1308 } 1309 if (ring->data_dmat != NULL) { 1310 bus_dma_tag_destroy(ring->data_dmat); 1311 ring->data_dmat = NULL; 1312 } 1313 } 1314 1315 /* 1316 * Extract various information from EEPROM. 1317 */ 1318 static int 1319 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1320 { 1321 #define WPI_CHK(res) do { \ 1322 if ((error = res) != 0) \ 1323 goto fail; \ 1324 } while (0) 1325 uint8_t i; 1326 int error; 1327 1328 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1329 1330 /* Adapter has to be powered on for EEPROM access to work. */ 1331 if ((error = wpi_apm_init(sc)) != 0) { 1332 device_printf(sc->sc_dev, 1333 "%s: could not power ON adapter, error %d\n", __func__, 1334 error); 1335 return error; 1336 } 1337 1338 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1339 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1340 error = EIO; 1341 goto fail; 1342 } 1343 /* Clear HW ownership of EEPROM. */ 1344 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1345 1346 /* Read the hardware capabilities, revision and SKU type. */ 1347 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1348 sizeof(sc->cap))); 1349 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1350 sizeof(sc->rev))); 1351 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1352 sizeof(sc->type))); 1353 1354 sc->rev = le16toh(sc->rev); 1355 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1356 sc->rev, sc->type); 1357 1358 /* Read the regulatory domain (4 ASCII characters.) */ 1359 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1360 sizeof(sc->domain))); 1361 1362 /* Read MAC address. */ 1363 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1364 IEEE80211_ADDR_LEN)); 1365 1366 /* Read the list of authorized channels. */ 1367 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1368 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1369 1370 /* Read the list of TX power groups. */ 1371 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1372 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1373 1374 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1375 1376 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1377 __func__); 1378 1379 return error; 1380 #undef WPI_CHK 1381 } 1382 1383 /* 1384 * Translate EEPROM flags to net80211. 1385 */ 1386 static uint32_t 1387 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1388 { 1389 uint32_t nflags; 1390 1391 nflags = 0; 1392 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1393 nflags |= IEEE80211_CHAN_PASSIVE; 1394 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1395 nflags |= IEEE80211_CHAN_NOADHOC; 1396 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1397 nflags |= IEEE80211_CHAN_DFS; 1398 /* XXX apparently IBSS may still be marked */ 1399 nflags |= IEEE80211_CHAN_NOADHOC; 1400 } 1401 1402 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1403 if (nflags & IEEE80211_CHAN_NOADHOC) 1404 nflags |= IEEE80211_CHAN_NOHOSTAP; 1405 1406 return nflags; 1407 } 1408 1409 static void 1410 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans, 1411 int *nchans, struct ieee80211_channel chans[]) 1412 { 1413 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1414 const struct wpi_chan_band *band = &wpi_bands[n]; 1415 struct ieee80211_channel *c; 1416 uint32_t nflags; 1417 uint8_t chan, i; 1418 1419 for (i = 0; i < band->nchan; i++) { 1420 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1421 DPRINTF(sc, WPI_DEBUG_EEPROM, 1422 "Channel Not Valid: %d, band %d\n", 1423 band->chan[i],n); 1424 continue; 1425 } 1426 1427 if (*nchans >= maxchans) 1428 break; 1429 1430 chan = band->chan[i]; 1431 nflags = wpi_eeprom_channel_flags(&channels[i]); 1432 1433 c = &chans[(*nchans)++]; 1434 c->ic_ieee = chan; 1435 c->ic_maxregpower = channels[i].maxpwr; 1436 c->ic_maxpower = 2*c->ic_maxregpower; 1437 1438 if (n == 0) { /* 2GHz band */ 1439 c->ic_freq = ieee80211_ieee2mhz(chan, 1440 IEEE80211_CHAN_G); 1441 1442 /* G =>'s B is supported */ 1443 c->ic_flags = IEEE80211_CHAN_B | nflags; 1444 1445 if (*nchans >= maxchans) 1446 break; 1447 1448 c = &chans[(*nchans)++]; 1449 c[0] = c[-1]; 1450 c->ic_flags = IEEE80211_CHAN_G | nflags; 1451 } else { /* 5GHz band */ 1452 c->ic_freq = ieee80211_ieee2mhz(chan, 1453 IEEE80211_CHAN_A); 1454 1455 c->ic_flags = IEEE80211_CHAN_A | nflags; 1456 } 1457 1458 /* Save maximum allowed TX power for this channel. */ 1459 sc->maxpwr[chan] = channels[i].maxpwr; 1460 1461 DPRINTF(sc, WPI_DEBUG_EEPROM, 1462 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1463 " offset %d\n", chan, c->ic_freq, 1464 channels[i].flags, sc->maxpwr[chan], 1465 IEEE80211_IS_CHAN_PASSIVE(c), *nchans); 1466 } 1467 } 1468 1469 /** 1470 * Read the eeprom to find out what channels are valid for the given 1471 * band and update net80211 with what we find. 1472 */ 1473 static int 1474 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1475 { 1476 struct ieee80211com *ic = &sc->sc_ic; 1477 const struct wpi_chan_band *band = &wpi_bands[n]; 1478 int error; 1479 1480 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1481 1482 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1483 band->nchan * sizeof (struct wpi_eeprom_chan)); 1484 if (error != 0) { 1485 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1486 return error; 1487 } 1488 1489 wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 1490 ic->ic_channels); 1491 1492 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1493 1494 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1495 1496 return 0; 1497 } 1498 1499 static struct wpi_eeprom_chan * 1500 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1501 { 1502 int i, j; 1503 1504 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1505 for (i = 0; i < wpi_bands[j].nchan; i++) 1506 if (wpi_bands[j].chan[i] == c->ic_ieee) 1507 return &sc->eeprom_channels[j][i]; 1508 1509 return NULL; 1510 } 1511 1512 static void 1513 wpi_getradiocaps(struct ieee80211com *ic, 1514 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1515 { 1516 struct wpi_softc *sc = ic->ic_softc; 1517 int i; 1518 1519 /* Parse the list of authorized channels. */ 1520 for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++) 1521 wpi_read_eeprom_band(sc, i, maxchans, nchans, chans); 1522 } 1523 1524 /* 1525 * Enforce flags read from EEPROM. 1526 */ 1527 static int 1528 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1529 int nchan, struct ieee80211_channel chans[]) 1530 { 1531 struct wpi_softc *sc = ic->ic_softc; 1532 int i; 1533 1534 for (i = 0; i < nchan; i++) { 1535 struct ieee80211_channel *c = &chans[i]; 1536 struct wpi_eeprom_chan *channel; 1537 1538 channel = wpi_find_eeprom_channel(sc, c); 1539 if (channel == NULL) { 1540 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1541 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1542 return EINVAL; 1543 } 1544 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1545 } 1546 1547 return 0; 1548 } 1549 1550 static int 1551 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1552 { 1553 struct wpi_power_group *group = &sc->groups[n]; 1554 struct wpi_eeprom_group rgroup; 1555 int i, error; 1556 1557 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1558 1559 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1560 &rgroup, sizeof rgroup)) != 0) { 1561 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1562 return error; 1563 } 1564 1565 /* Save TX power group information. */ 1566 group->chan = rgroup.chan; 1567 group->maxpwr = rgroup.maxpwr; 1568 /* Retrieve temperature at which the samples were taken. */ 1569 group->temp = (int16_t)le16toh(rgroup.temp); 1570 1571 DPRINTF(sc, WPI_DEBUG_EEPROM, 1572 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1573 group->maxpwr, group->temp); 1574 1575 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1576 group->samples[i].index = rgroup.samples[i].index; 1577 group->samples[i].power = rgroup.samples[i].power; 1578 1579 DPRINTF(sc, WPI_DEBUG_EEPROM, 1580 "\tsample %d: index=%d power=%d\n", i, 1581 group->samples[i].index, group->samples[i].power); 1582 } 1583 1584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1585 1586 return 0; 1587 } 1588 1589 static __inline uint8_t 1590 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1591 { 1592 uint8_t newid = WPI_ID_IBSS_MIN; 1593 1594 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1595 if ((sc->nodesmsk & (1 << newid)) == 0) { 1596 sc->nodesmsk |= 1 << newid; 1597 return newid; 1598 } 1599 } 1600 1601 return WPI_ID_UNDEFINED; 1602 } 1603 1604 static __inline uint8_t 1605 wpi_add_node_entry_sta(struct wpi_softc *sc) 1606 { 1607 sc->nodesmsk |= 1 << WPI_ID_BSS; 1608 1609 return WPI_ID_BSS; 1610 } 1611 1612 static __inline int 1613 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1614 { 1615 if (id == WPI_ID_UNDEFINED) 1616 return 0; 1617 1618 return (sc->nodesmsk >> id) & 1; 1619 } 1620 1621 static __inline void 1622 wpi_clear_node_table(struct wpi_softc *sc) 1623 { 1624 sc->nodesmsk = 0; 1625 } 1626 1627 static __inline void 1628 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1629 { 1630 sc->nodesmsk &= ~(1 << id); 1631 } 1632 1633 static struct ieee80211_node * 1634 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1635 { 1636 struct wpi_node *wn; 1637 1638 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1639 M_NOWAIT | M_ZERO); 1640 1641 if (wn == NULL) 1642 return NULL; 1643 1644 wn->id = WPI_ID_UNDEFINED; 1645 1646 return &wn->ni; 1647 } 1648 1649 static void 1650 wpi_node_free(struct ieee80211_node *ni) 1651 { 1652 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1653 struct wpi_node *wn = WPI_NODE(ni); 1654 1655 if (wn->id != WPI_ID_UNDEFINED) { 1656 WPI_NT_LOCK(sc); 1657 if (wpi_check_node_entry(sc, wn->id)) { 1658 wpi_del_node_entry(sc, wn->id); 1659 wpi_del_node(sc, ni); 1660 } 1661 WPI_NT_UNLOCK(sc); 1662 } 1663 1664 sc->sc_node_free(ni); 1665 } 1666 1667 static __inline int 1668 wpi_check_bss_filter(struct wpi_softc *sc) 1669 { 1670 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1671 } 1672 1673 static void 1674 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1675 const struct ieee80211_rx_stats *rxs, 1676 int rssi, int nf) 1677 { 1678 struct ieee80211vap *vap = ni->ni_vap; 1679 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1680 struct wpi_vap *wvp = WPI_VAP(vap); 1681 uint64_t ni_tstamp, rx_tstamp; 1682 1683 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1684 1685 if (vap->iv_state == IEEE80211_S_RUN && 1686 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1687 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1688 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1689 rx_tstamp = le64toh(sc->rx_tstamp); 1690 1691 if (ni_tstamp >= rx_tstamp) { 1692 DPRINTF(sc, WPI_DEBUG_STATE, 1693 "ibss merge, tsf %ju tstamp %ju\n", 1694 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1695 (void) ieee80211_ibss_merge(ni); 1696 } 1697 } 1698 } 1699 1700 static void 1701 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1702 { 1703 struct wpi_softc *sc = arg; 1704 struct wpi_node *wn = WPI_NODE(ni); 1705 int error; 1706 1707 WPI_NT_LOCK(sc); 1708 if (wn->id != WPI_ID_UNDEFINED) { 1709 wn->id = WPI_ID_UNDEFINED; 1710 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1711 device_printf(sc->sc_dev, 1712 "%s: could not add IBSS node, error %d\n", 1713 __func__, error); 1714 } 1715 } 1716 WPI_NT_UNLOCK(sc); 1717 } 1718 1719 static void 1720 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1721 { 1722 struct ieee80211com *ic = &sc->sc_ic; 1723 1724 /* Set group keys once. */ 1725 WPI_NT_LOCK(sc); 1726 wvp->wv_gtk = 0; 1727 WPI_NT_UNLOCK(sc); 1728 1729 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1730 ieee80211_crypto_reload_keys(ic); 1731 } 1732 1733 /** 1734 * Called by net80211 when ever there is a change to 80211 state machine 1735 */ 1736 static int 1737 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1738 { 1739 struct wpi_vap *wvp = WPI_VAP(vap); 1740 struct ieee80211com *ic = vap->iv_ic; 1741 struct wpi_softc *sc = ic->ic_softc; 1742 int error = 0; 1743 1744 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1745 1746 WPI_TXQ_LOCK(sc); 1747 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1748 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1749 WPI_TXQ_UNLOCK(sc); 1750 1751 return ENXIO; 1752 } 1753 WPI_TXQ_UNLOCK(sc); 1754 1755 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1756 ieee80211_state_name[vap->iv_state], 1757 ieee80211_state_name[nstate]); 1758 1759 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1760 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1761 device_printf(sc->sc_dev, 1762 "%s: could not set power saving level\n", 1763 __func__); 1764 return error; 1765 } 1766 1767 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1768 } 1769 1770 switch (nstate) { 1771 case IEEE80211_S_SCAN: 1772 WPI_RXON_LOCK(sc); 1773 if (wpi_check_bss_filter(sc) != 0) { 1774 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1775 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1776 device_printf(sc->sc_dev, 1777 "%s: could not send RXON\n", __func__); 1778 } 1779 } 1780 WPI_RXON_UNLOCK(sc); 1781 break; 1782 1783 case IEEE80211_S_ASSOC: 1784 if (vap->iv_state != IEEE80211_S_RUN) 1785 break; 1786 /* FALLTHROUGH */ 1787 case IEEE80211_S_AUTH: 1788 /* 1789 * NB: do not optimize AUTH -> AUTH state transmission - 1790 * this will break powersave with non-QoS AP! 1791 */ 1792 1793 /* 1794 * The node must be registered in the firmware before auth. 1795 * Also the associd must be cleared on RUN -> ASSOC 1796 * transitions. 1797 */ 1798 if ((error = wpi_auth(sc, vap)) != 0) { 1799 device_printf(sc->sc_dev, 1800 "%s: could not move to AUTH state, error %d\n", 1801 __func__, error); 1802 } 1803 break; 1804 1805 case IEEE80211_S_RUN: 1806 /* 1807 * RUN -> RUN transition: 1808 * STA mode: Just restart the timers. 1809 * IBSS mode: Process IBSS merge. 1810 */ 1811 if (vap->iv_state == IEEE80211_S_RUN) { 1812 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1813 WPI_RXON_LOCK(sc); 1814 wpi_calib_timeout(sc); 1815 WPI_RXON_UNLOCK(sc); 1816 break; 1817 } else { 1818 /* 1819 * Drop the BSS_FILTER bit 1820 * (there is no another way to change bssid). 1821 */ 1822 WPI_RXON_LOCK(sc); 1823 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1824 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1825 device_printf(sc->sc_dev, 1826 "%s: could not send RXON\n", 1827 __func__); 1828 } 1829 WPI_RXON_UNLOCK(sc); 1830 1831 /* Restore all what was lost. */ 1832 wpi_restore_node_table(sc, wvp); 1833 1834 /* XXX set conditionally? */ 1835 wpi_updateedca(ic); 1836 } 1837 } 1838 1839 /* 1840 * !RUN -> RUN requires setting the association id 1841 * which is done with a firmware cmd. We also defer 1842 * starting the timers until that work is done. 1843 */ 1844 if ((error = wpi_run(sc, vap)) != 0) { 1845 device_printf(sc->sc_dev, 1846 "%s: could not move to RUN state\n", __func__); 1847 } 1848 break; 1849 1850 default: 1851 break; 1852 } 1853 if (error != 0) { 1854 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1855 return error; 1856 } 1857 1858 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1859 1860 return wvp->wv_newstate(vap, nstate, arg); 1861 } 1862 1863 static void 1864 wpi_calib_timeout(void *arg) 1865 { 1866 struct wpi_softc *sc = arg; 1867 1868 if (wpi_check_bss_filter(sc) == 0) 1869 return; 1870 1871 wpi_power_calibration(sc); 1872 1873 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1874 } 1875 1876 static __inline uint8_t 1877 rate2plcp(const uint8_t rate) 1878 { 1879 switch (rate) { 1880 case 12: return 0xd; 1881 case 18: return 0xf; 1882 case 24: return 0x5; 1883 case 36: return 0x7; 1884 case 48: return 0x9; 1885 case 72: return 0xb; 1886 case 96: return 0x1; 1887 case 108: return 0x3; 1888 case 2: return 10; 1889 case 4: return 20; 1890 case 11: return 55; 1891 case 22: return 110; 1892 default: return 0; 1893 } 1894 } 1895 1896 static __inline uint8_t 1897 plcp2rate(const uint8_t plcp) 1898 { 1899 switch (plcp) { 1900 case 0xd: return 12; 1901 case 0xf: return 18; 1902 case 0x5: return 24; 1903 case 0x7: return 36; 1904 case 0x9: return 48; 1905 case 0xb: return 72; 1906 case 0x1: return 96; 1907 case 0x3: return 108; 1908 case 10: return 2; 1909 case 20: return 4; 1910 case 55: return 11; 1911 case 110: return 22; 1912 default: return 0; 1913 } 1914 } 1915 1916 /* Quickly determine if a given rate is CCK or OFDM. */ 1917 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1918 1919 static void 1920 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1921 struct wpi_rx_data *data) 1922 { 1923 struct ieee80211com *ic = &sc->sc_ic; 1924 struct wpi_rx_ring *ring = &sc->rxq; 1925 struct wpi_rx_stat *stat; 1926 struct wpi_rx_head *head; 1927 struct wpi_rx_tail *tail; 1928 struct ieee80211_frame *wh; 1929 struct ieee80211_node *ni; 1930 struct mbuf *m, *m1; 1931 bus_addr_t paddr; 1932 uint32_t flags; 1933 uint16_t len; 1934 int error; 1935 1936 stat = (struct wpi_rx_stat *)(desc + 1); 1937 1938 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1939 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1940 goto fail1; 1941 } 1942 1943 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1944 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1945 len = le16toh(head->len); 1946 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1947 flags = le32toh(tail->flags); 1948 1949 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1950 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1951 le32toh(desc->len), len, (int8_t)stat->rssi, 1952 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1953 1954 /* Discard frames with a bad FCS early. */ 1955 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1956 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1957 __func__, flags); 1958 goto fail1; 1959 } 1960 /* Discard frames that are too short. */ 1961 if (len < sizeof (struct ieee80211_frame_ack)) { 1962 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1963 __func__, len); 1964 goto fail1; 1965 } 1966 1967 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1968 if (__predict_false(m1 == NULL)) { 1969 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1970 __func__); 1971 goto fail1; 1972 } 1973 bus_dmamap_unload(ring->data_dmat, data->map); 1974 1975 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1976 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1977 if (__predict_false(error != 0 && error != EFBIG)) { 1978 device_printf(sc->sc_dev, 1979 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1980 m_freem(m1); 1981 1982 /* Try to reload the old mbuf. */ 1983 error = bus_dmamap_load(ring->data_dmat, data->map, 1984 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1985 &paddr, BUS_DMA_NOWAIT); 1986 if (error != 0 && error != EFBIG) { 1987 panic("%s: could not load old RX mbuf", __func__); 1988 } 1989 /* Physical address may have changed. */ 1990 ring->desc[ring->cur] = htole32(paddr); 1991 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1992 BUS_DMASYNC_PREWRITE); 1993 goto fail1; 1994 } 1995 1996 m = data->m; 1997 data->m = m1; 1998 /* Update RX descriptor. */ 1999 ring->desc[ring->cur] = htole32(paddr); 2000 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2001 BUS_DMASYNC_PREWRITE); 2002 2003 /* Finalize mbuf. */ 2004 m->m_data = (caddr_t)(head + 1); 2005 m->m_pkthdr.len = m->m_len = len; 2006 2007 /* Grab a reference to the source node. */ 2008 wh = mtod(m, struct ieee80211_frame *); 2009 2010 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2011 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2012 /* Check whether decryption was successful or not. */ 2013 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2014 DPRINTF(sc, WPI_DEBUG_RECV, 2015 "CCMP decryption failed 0x%x\n", flags); 2016 goto fail2; 2017 } 2018 m->m_flags |= M_WEP; 2019 } 2020 2021 if (len >= sizeof(struct ieee80211_frame_min)) 2022 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2023 else 2024 ni = NULL; 2025 2026 sc->rx_tstamp = tail->tstamp; 2027 2028 if (ieee80211_radiotap_active(ic)) { 2029 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2030 2031 tap->wr_flags = 0; 2032 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2033 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2034 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2035 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2036 tap->wr_tsft = tail->tstamp; 2037 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2038 tap->wr_rate = plcp2rate(head->plcp); 2039 } 2040 2041 WPI_UNLOCK(sc); 2042 2043 /* Send the frame to the 802.11 layer. */ 2044 if (ni != NULL) { 2045 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2046 /* Node is no longer needed. */ 2047 ieee80211_free_node(ni); 2048 } else 2049 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2050 2051 WPI_LOCK(sc); 2052 2053 return; 2054 2055 fail2: m_freem(m); 2056 2057 fail1: counter_u64_add(ic->ic_ierrors, 1); 2058 } 2059 2060 static void 2061 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2062 struct wpi_rx_data *data) 2063 { 2064 /* Ignore */ 2065 } 2066 2067 static void 2068 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2069 { 2070 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2071 struct wpi_tx_data *data = &ring->data[desc->idx]; 2072 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2073 struct mbuf *m; 2074 struct ieee80211_node *ni; 2075 struct ieee80211vap *vap; 2076 struct ieee80211com *ic; 2077 uint32_t status = le32toh(stat->status); 2078 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2079 2080 KASSERT(data->ni != NULL, ("no node")); 2081 KASSERT(data->m != NULL, ("no mbuf")); 2082 2083 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2084 2085 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2086 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2087 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2088 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2089 2090 /* Unmap and free mbuf. */ 2091 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2092 bus_dmamap_unload(ring->data_dmat, data->map); 2093 m = data->m, data->m = NULL; 2094 ni = data->ni, data->ni = NULL; 2095 vap = ni->ni_vap; 2096 ic = vap->iv_ic; 2097 2098 /* 2099 * Update rate control statistics for the node. 2100 */ 2101 if (status & WPI_TX_STATUS_FAIL) { 2102 ieee80211_ratectl_tx_complete(vap, ni, 2103 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2104 } else 2105 ieee80211_ratectl_tx_complete(vap, ni, 2106 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2107 2108 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2109 2110 WPI_TXQ_STATE_LOCK(sc); 2111 if (--ring->queued > 0) 2112 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2113 else 2114 callout_stop(&sc->tx_timeout); 2115 WPI_TXQ_STATE_UNLOCK(sc); 2116 2117 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2118 } 2119 2120 /* 2121 * Process a "command done" firmware notification. This is where we wakeup 2122 * processes waiting for a synchronous command completion. 2123 */ 2124 static void 2125 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2126 { 2127 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2128 struct wpi_tx_data *data; 2129 struct wpi_tx_cmd *cmd; 2130 2131 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2132 "type %s len %d\n", desc->qid, desc->idx, 2133 desc->flags, wpi_cmd_str(desc->type), 2134 le32toh(desc->len)); 2135 2136 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2137 return; /* Not a command ack. */ 2138 2139 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2140 2141 data = &ring->data[desc->idx]; 2142 cmd = &ring->cmd[desc->idx]; 2143 2144 /* If the command was mapped in an mbuf, free it. */ 2145 if (data->m != NULL) { 2146 bus_dmamap_sync(ring->data_dmat, data->map, 2147 BUS_DMASYNC_POSTWRITE); 2148 bus_dmamap_unload(ring->data_dmat, data->map); 2149 m_freem(data->m); 2150 data->m = NULL; 2151 } 2152 2153 wakeup(cmd); 2154 2155 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2156 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2157 2158 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2159 BUS_DMASYNC_POSTREAD); 2160 2161 WPI_TXQ_LOCK(sc); 2162 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2163 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2164 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2165 } else { 2166 sc->sc_update_rx_ring = wpi_update_rx_ring; 2167 sc->sc_update_tx_ring = wpi_update_tx_ring; 2168 } 2169 WPI_TXQ_UNLOCK(sc); 2170 } 2171 } 2172 2173 static void 2174 wpi_notif_intr(struct wpi_softc *sc) 2175 { 2176 struct ieee80211com *ic = &sc->sc_ic; 2177 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2178 uint32_t hw; 2179 2180 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2181 BUS_DMASYNC_POSTREAD); 2182 2183 hw = le32toh(sc->shared->next) & 0xfff; 2184 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2185 2186 while (sc->rxq.cur != hw) { 2187 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2188 2189 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2190 struct wpi_rx_desc *desc; 2191 2192 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2193 BUS_DMASYNC_POSTREAD); 2194 desc = mtod(data->m, struct wpi_rx_desc *); 2195 2196 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2197 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2198 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2199 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2200 2201 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2202 /* Reply to a command. */ 2203 wpi_cmd_done(sc, desc); 2204 } 2205 2206 switch (desc->type) { 2207 case WPI_RX_DONE: 2208 /* An 802.11 frame has been received. */ 2209 wpi_rx_done(sc, desc, data); 2210 2211 if (__predict_false(sc->sc_running == 0)) { 2212 /* wpi_stop() was called. */ 2213 return; 2214 } 2215 2216 break; 2217 2218 case WPI_TX_DONE: 2219 /* An 802.11 frame has been transmitted. */ 2220 wpi_tx_done(sc, desc); 2221 break; 2222 2223 case WPI_RX_STATISTICS: 2224 case WPI_BEACON_STATISTICS: 2225 wpi_rx_statistics(sc, desc, data); 2226 break; 2227 2228 case WPI_BEACON_MISSED: 2229 { 2230 struct wpi_beacon_missed *miss = 2231 (struct wpi_beacon_missed *)(desc + 1); 2232 uint32_t expected, misses, received, threshold; 2233 2234 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2235 BUS_DMASYNC_POSTREAD); 2236 2237 misses = le32toh(miss->consecutive); 2238 expected = le32toh(miss->expected); 2239 received = le32toh(miss->received); 2240 threshold = MAX(2, vap->iv_bmissthreshold); 2241 2242 DPRINTF(sc, WPI_DEBUG_BMISS, 2243 "%s: beacons missed %u(%u) (received %u/%u)\n", 2244 __func__, misses, le32toh(miss->total), received, 2245 expected); 2246 2247 if (misses >= threshold || 2248 (received == 0 && expected >= threshold)) { 2249 WPI_RXON_LOCK(sc); 2250 if (callout_pending(&sc->scan_timeout)) { 2251 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2252 0, 1); 2253 } 2254 WPI_RXON_UNLOCK(sc); 2255 if (vap->iv_state == IEEE80211_S_RUN && 2256 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2257 ieee80211_beacon_miss(ic); 2258 } 2259 2260 break; 2261 } 2262 #ifdef WPI_DEBUG 2263 case WPI_BEACON_SENT: 2264 { 2265 struct wpi_tx_stat *stat = 2266 (struct wpi_tx_stat *)(desc + 1); 2267 uint64_t *tsf = (uint64_t *)(stat + 1); 2268 uint32_t *mode = (uint32_t *)(tsf + 1); 2269 2270 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2271 BUS_DMASYNC_POSTREAD); 2272 2273 DPRINTF(sc, WPI_DEBUG_BEACON, 2274 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2275 "duration %u, status %x, tsf %ju, mode %x\n", 2276 stat->rtsfailcnt, stat->ackfailcnt, 2277 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2278 le32toh(stat->status), le64toh(*tsf), 2279 le32toh(*mode)); 2280 2281 break; 2282 } 2283 #endif 2284 case WPI_UC_READY: 2285 { 2286 struct wpi_ucode_info *uc = 2287 (struct wpi_ucode_info *)(desc + 1); 2288 2289 /* The microcontroller is ready. */ 2290 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2291 BUS_DMASYNC_POSTREAD); 2292 DPRINTF(sc, WPI_DEBUG_RESET, 2293 "microcode alive notification version=%d.%d " 2294 "subtype=%x alive=%x\n", uc->major, uc->minor, 2295 uc->subtype, le32toh(uc->valid)); 2296 2297 if (le32toh(uc->valid) != 1) { 2298 device_printf(sc->sc_dev, 2299 "microcontroller initialization failed\n"); 2300 wpi_stop_locked(sc); 2301 return; 2302 } 2303 /* Save the address of the error log in SRAM. */ 2304 sc->errptr = le32toh(uc->errptr); 2305 break; 2306 } 2307 case WPI_STATE_CHANGED: 2308 { 2309 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2310 BUS_DMASYNC_POSTREAD); 2311 2312 uint32_t *status = (uint32_t *)(desc + 1); 2313 2314 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2315 le32toh(*status)); 2316 2317 if (le32toh(*status) & 1) { 2318 WPI_NT_LOCK(sc); 2319 wpi_clear_node_table(sc); 2320 WPI_NT_UNLOCK(sc); 2321 ieee80211_runtask(ic, 2322 &sc->sc_radiooff_task); 2323 return; 2324 } 2325 break; 2326 } 2327 #ifdef WPI_DEBUG 2328 case WPI_START_SCAN: 2329 { 2330 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2331 BUS_DMASYNC_POSTREAD); 2332 2333 struct wpi_start_scan *scan = 2334 (struct wpi_start_scan *)(desc + 1); 2335 DPRINTF(sc, WPI_DEBUG_SCAN, 2336 "%s: scanning channel %d status %x\n", 2337 __func__, scan->chan, le32toh(scan->status)); 2338 2339 break; 2340 } 2341 #endif 2342 case WPI_STOP_SCAN: 2343 { 2344 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2345 BUS_DMASYNC_POSTREAD); 2346 2347 struct wpi_stop_scan *scan = 2348 (struct wpi_stop_scan *)(desc + 1); 2349 2350 DPRINTF(sc, WPI_DEBUG_SCAN, 2351 "scan finished nchan=%d status=%d chan=%d\n", 2352 scan->nchan, scan->status, scan->chan); 2353 2354 WPI_RXON_LOCK(sc); 2355 callout_stop(&sc->scan_timeout); 2356 WPI_RXON_UNLOCK(sc); 2357 if (scan->status == WPI_SCAN_ABORTED) 2358 ieee80211_cancel_scan(vap); 2359 else 2360 ieee80211_scan_next(vap); 2361 break; 2362 } 2363 } 2364 2365 if (sc->rxq.cur % 8 == 0) { 2366 /* Tell the firmware what we have processed. */ 2367 sc->sc_update_rx_ring(sc); 2368 } 2369 } 2370 } 2371 2372 /* 2373 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2374 * from power-down sleep mode. 2375 */ 2376 static void 2377 wpi_wakeup_intr(struct wpi_softc *sc) 2378 { 2379 int qid; 2380 2381 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2382 "%s: ucode wakeup from power-down sleep\n", __func__); 2383 2384 /* Wakeup RX and TX rings. */ 2385 if (sc->rxq.update) { 2386 sc->rxq.update = 0; 2387 wpi_update_rx_ring(sc); 2388 } 2389 WPI_TXQ_LOCK(sc); 2390 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2391 struct wpi_tx_ring *ring = &sc->txq[qid]; 2392 2393 if (ring->update) { 2394 ring->update = 0; 2395 wpi_update_tx_ring(sc, ring); 2396 } 2397 } 2398 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2399 WPI_TXQ_UNLOCK(sc); 2400 } 2401 2402 /* 2403 * This function prints firmware registers 2404 */ 2405 #ifdef WPI_DEBUG 2406 static void 2407 wpi_debug_registers(struct wpi_softc *sc) 2408 { 2409 size_t i; 2410 static const uint32_t csr_tbl[] = { 2411 WPI_HW_IF_CONFIG, 2412 WPI_INT, 2413 WPI_INT_MASK, 2414 WPI_FH_INT, 2415 WPI_GPIO_IN, 2416 WPI_RESET, 2417 WPI_GP_CNTRL, 2418 WPI_EEPROM, 2419 WPI_EEPROM_GP, 2420 WPI_GIO, 2421 WPI_UCODE_GP1, 2422 WPI_UCODE_GP2, 2423 WPI_GIO_CHICKEN, 2424 WPI_ANA_PLL, 2425 WPI_DBG_HPET_MEM, 2426 }; 2427 static const uint32_t prph_tbl[] = { 2428 WPI_APMG_CLK_CTRL, 2429 WPI_APMG_PS, 2430 WPI_APMG_PCI_STT, 2431 WPI_APMG_RFKILL, 2432 }; 2433 2434 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2435 2436 for (i = 0; i < nitems(csr_tbl); i++) { 2437 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2438 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2439 2440 if ((i + 1) % 2 == 0) 2441 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2442 } 2443 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2444 2445 if (wpi_nic_lock(sc) == 0) { 2446 for (i = 0; i < nitems(prph_tbl); i++) { 2447 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2448 wpi_get_prph_string(prph_tbl[i]), 2449 wpi_prph_read(sc, prph_tbl[i])); 2450 2451 if ((i + 1) % 2 == 0) 2452 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2453 } 2454 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2455 wpi_nic_unlock(sc); 2456 } else { 2457 DPRINTF(sc, WPI_DEBUG_REGISTER, 2458 "Cannot access internal registers.\n"); 2459 } 2460 } 2461 #endif 2462 2463 /* 2464 * Dump the error log of the firmware when a firmware panic occurs. Although 2465 * we can't debug the firmware because it is neither open source nor free, it 2466 * can help us to identify certain classes of problems. 2467 */ 2468 static void 2469 wpi_fatal_intr(struct wpi_softc *sc) 2470 { 2471 struct wpi_fw_dump dump; 2472 uint32_t i, offset, count; 2473 2474 /* Check that the error log address is valid. */ 2475 if (sc->errptr < WPI_FW_DATA_BASE || 2476 sc->errptr + sizeof (dump) > 2477 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2478 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2479 sc->errptr); 2480 return; 2481 } 2482 if (wpi_nic_lock(sc) != 0) { 2483 printf("%s: could not read firmware error log\n", __func__); 2484 return; 2485 } 2486 /* Read number of entries in the log. */ 2487 count = wpi_mem_read(sc, sc->errptr); 2488 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2489 printf("%s: invalid count field (count = %u)\n", __func__, 2490 count); 2491 wpi_nic_unlock(sc); 2492 return; 2493 } 2494 /* Skip "count" field. */ 2495 offset = sc->errptr + sizeof (uint32_t); 2496 printf("firmware error log (count = %u):\n", count); 2497 for (i = 0; i < count; i++) { 2498 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2499 sizeof (dump) / sizeof (uint32_t)); 2500 2501 printf(" error type = \"%s\" (0x%08X)\n", 2502 (dump.desc < nitems(wpi_fw_errmsg)) ? 2503 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2504 dump.desc); 2505 printf(" error data = 0x%08X\n", 2506 dump.data); 2507 printf(" branch link = 0x%08X%08X\n", 2508 dump.blink[0], dump.blink[1]); 2509 printf(" interrupt link = 0x%08X%08X\n", 2510 dump.ilink[0], dump.ilink[1]); 2511 printf(" time = %u\n", dump.time); 2512 2513 offset += sizeof (dump); 2514 } 2515 wpi_nic_unlock(sc); 2516 /* Dump driver status (TX and RX rings) while we're here. */ 2517 printf("driver status:\n"); 2518 WPI_TXQ_LOCK(sc); 2519 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2520 struct wpi_tx_ring *ring = &sc->txq[i]; 2521 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2522 i, ring->qid, ring->cur, ring->queued); 2523 } 2524 WPI_TXQ_UNLOCK(sc); 2525 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2526 } 2527 2528 static void 2529 wpi_intr(void *arg) 2530 { 2531 struct wpi_softc *sc = arg; 2532 uint32_t r1, r2; 2533 2534 WPI_LOCK(sc); 2535 2536 /* Disable interrupts. */ 2537 WPI_WRITE(sc, WPI_INT_MASK, 0); 2538 2539 r1 = WPI_READ(sc, WPI_INT); 2540 2541 if (__predict_false(r1 == 0xffffffff || 2542 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2543 goto end; /* Hardware gone! */ 2544 2545 r2 = WPI_READ(sc, WPI_FH_INT); 2546 2547 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2548 r1, r2); 2549 2550 if (r1 == 0 && r2 == 0) 2551 goto done; /* Interrupt not for us. */ 2552 2553 /* Acknowledge interrupts. */ 2554 WPI_WRITE(sc, WPI_INT, r1); 2555 WPI_WRITE(sc, WPI_FH_INT, r2); 2556 2557 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2558 struct ieee80211com *ic = &sc->sc_ic; 2559 2560 device_printf(sc->sc_dev, "fatal firmware error\n"); 2561 #ifdef WPI_DEBUG 2562 wpi_debug_registers(sc); 2563 #endif 2564 wpi_fatal_intr(sc); 2565 DPRINTF(sc, WPI_DEBUG_HW, 2566 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2567 "(Hardware Error)"); 2568 ieee80211_restart_all(ic); 2569 goto end; 2570 } 2571 2572 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2573 (r2 & WPI_FH_INT_RX)) 2574 wpi_notif_intr(sc); 2575 2576 if (r1 & WPI_INT_ALIVE) 2577 wakeup(sc); /* Firmware is alive. */ 2578 2579 if (r1 & WPI_INT_WAKEUP) 2580 wpi_wakeup_intr(sc); 2581 2582 done: 2583 /* Re-enable interrupts. */ 2584 if (__predict_true(sc->sc_running)) 2585 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2586 2587 end: WPI_UNLOCK(sc); 2588 } 2589 2590 static void 2591 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2592 { 2593 struct wpi_tx_ring *ring; 2594 struct wpi_tx_data *data; 2595 uint8_t cur; 2596 2597 WPI_TXQ_LOCK(sc); 2598 ring = &sc->txq[ac]; 2599 2600 while (ring->pending != 0) { 2601 ring->pending--; 2602 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2603 data = &ring->data[cur]; 2604 2605 bus_dmamap_sync(ring->data_dmat, data->map, 2606 BUS_DMASYNC_POSTWRITE); 2607 bus_dmamap_unload(ring->data_dmat, data->map); 2608 m_freem(data->m); 2609 data->m = NULL; 2610 2611 ieee80211_node_decref(data->ni); 2612 data->ni = NULL; 2613 } 2614 2615 WPI_TXQ_UNLOCK(sc); 2616 } 2617 2618 static int 2619 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2620 { 2621 struct ieee80211_frame *wh; 2622 struct wpi_tx_cmd *cmd; 2623 struct wpi_tx_data *data; 2624 struct wpi_tx_desc *desc; 2625 struct wpi_tx_ring *ring; 2626 struct mbuf *m1; 2627 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2628 uint8_t cur, pad; 2629 uint16_t hdrlen; 2630 int error, i, nsegs, totlen, frag; 2631 2632 WPI_TXQ_LOCK(sc); 2633 2634 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2635 2636 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2637 2638 if (__predict_false(sc->sc_running == 0)) { 2639 /* wpi_stop() was called */ 2640 error = ENETDOWN; 2641 goto end; 2642 } 2643 2644 wh = mtod(buf->m, struct ieee80211_frame *); 2645 hdrlen = ieee80211_anyhdrsize(wh); 2646 totlen = buf->m->m_pkthdr.len; 2647 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2648 2649 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2650 error = EINVAL; 2651 goto end; 2652 } 2653 2654 if (hdrlen & 3) { 2655 /* First segment length must be a multiple of 4. */ 2656 pad = 4 - (hdrlen & 3); 2657 } else 2658 pad = 0; 2659 2660 ring = &sc->txq[buf->ac]; 2661 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2662 desc = &ring->desc[cur]; 2663 data = &ring->data[cur]; 2664 2665 /* Prepare TX firmware command. */ 2666 cmd = &ring->cmd[cur]; 2667 cmd->code = buf->code; 2668 cmd->flags = 0; 2669 cmd->qid = ring->qid; 2670 cmd->idx = cur; 2671 2672 memcpy(cmd->data, buf->data, buf->size); 2673 2674 /* Save and trim IEEE802.11 header. */ 2675 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2676 m_adj(buf->m, hdrlen); 2677 2678 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2679 segs, &nsegs, BUS_DMA_NOWAIT); 2680 if (error != 0 && error != EFBIG) { 2681 device_printf(sc->sc_dev, 2682 "%s: can't map mbuf (error %d)\n", __func__, error); 2683 goto end; 2684 } 2685 if (error != 0) { 2686 /* Too many DMA segments, linearize mbuf. */ 2687 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2688 if (m1 == NULL) { 2689 device_printf(sc->sc_dev, 2690 "%s: could not defrag mbuf\n", __func__); 2691 error = ENOBUFS; 2692 goto end; 2693 } 2694 buf->m = m1; 2695 2696 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2697 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2698 if (__predict_false(error != 0)) { 2699 /* XXX fix this (applicable to the iwn(4) too) */ 2700 /* 2701 * NB: Do not return error; 2702 * original mbuf does not exist anymore. 2703 */ 2704 device_printf(sc->sc_dev, 2705 "%s: can't map mbuf (error %d)\n", __func__, 2706 error); 2707 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2708 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2709 IFCOUNTER_OERRORS, 1); 2710 if (!frag) 2711 ieee80211_free_node(buf->ni); 2712 } 2713 m_freem(buf->m); 2714 error = 0; 2715 goto end; 2716 } 2717 } 2718 2719 KASSERT(nsegs < WPI_MAX_SCATTER, 2720 ("too many DMA segments, nsegs (%d) should be less than %d", 2721 nsegs, WPI_MAX_SCATTER)); 2722 2723 data->m = buf->m; 2724 data->ni = buf->ni; 2725 2726 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2727 __func__, ring->qid, cur, totlen, nsegs); 2728 2729 /* Fill TX descriptor. */ 2730 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2731 /* First DMA segment is used by the TX command. */ 2732 desc->segs[0].addr = htole32(data->cmd_paddr); 2733 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2734 /* Other DMA segments are for data payload. */ 2735 seg = &segs[0]; 2736 for (i = 1; i <= nsegs; i++) { 2737 desc->segs[i].addr = htole32(seg->ds_addr); 2738 desc->segs[i].len = htole32(seg->ds_len); 2739 seg++; 2740 } 2741 2742 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2743 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2744 BUS_DMASYNC_PREWRITE); 2745 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2746 BUS_DMASYNC_PREWRITE); 2747 2748 ring->pending += 1; 2749 2750 if (!frag) { 2751 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2752 WPI_TXQ_STATE_LOCK(sc); 2753 ring->queued += ring->pending; 2754 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2755 sc); 2756 WPI_TXQ_STATE_UNLOCK(sc); 2757 } 2758 2759 /* Kick TX ring. */ 2760 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2761 ring->pending = 0; 2762 sc->sc_update_tx_ring(sc, ring); 2763 } else 2764 ieee80211_node_incref(data->ni); 2765 2766 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2767 __func__); 2768 2769 WPI_TXQ_UNLOCK(sc); 2770 2771 return (error); 2772 } 2773 2774 /* 2775 * Construct the data packet for a transmit buffer. 2776 */ 2777 static int 2778 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2779 { 2780 const struct ieee80211_txparam *tp; 2781 struct ieee80211vap *vap = ni->ni_vap; 2782 struct ieee80211com *ic = ni->ni_ic; 2783 struct wpi_node *wn = WPI_NODE(ni); 2784 struct ieee80211_channel *chan; 2785 struct ieee80211_frame *wh; 2786 struct ieee80211_key *k = NULL; 2787 struct wpi_buf tx_data; 2788 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2789 uint32_t flags; 2790 uint16_t ac, qos; 2791 uint8_t tid, type, rate; 2792 int swcrypt, ismcast, totlen; 2793 2794 wh = mtod(m, struct ieee80211_frame *); 2795 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2796 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2797 swcrypt = 1; 2798 2799 /* Select EDCA Access Category and TX ring for this frame. */ 2800 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2801 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2802 tid = qos & IEEE80211_QOS_TID; 2803 } else { 2804 qos = 0; 2805 tid = 0; 2806 } 2807 ac = M_WME_GETAC(m); 2808 2809 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2810 ni->ni_chan : ic->ic_curchan; 2811 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2812 2813 /* Choose a TX rate index. */ 2814 if (type == IEEE80211_FC0_TYPE_MGT) 2815 rate = tp->mgmtrate; 2816 else if (ismcast) 2817 rate = tp->mcastrate; 2818 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2819 rate = tp->ucastrate; 2820 else if (m->m_flags & M_EAPOL) 2821 rate = tp->mgmtrate; 2822 else { 2823 /* XXX pass pktlen */ 2824 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2825 rate = ni->ni_txrate; 2826 } 2827 2828 /* Encrypt the frame if need be. */ 2829 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2830 /* Retrieve key for TX. */ 2831 k = ieee80211_crypto_encap(ni, m); 2832 if (k == NULL) 2833 return (ENOBUFS); 2834 2835 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2836 2837 /* 802.11 header may have moved. */ 2838 wh = mtod(m, struct ieee80211_frame *); 2839 } 2840 totlen = m->m_pkthdr.len; 2841 2842 if (ieee80211_radiotap_active_vap(vap)) { 2843 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2844 2845 tap->wt_flags = 0; 2846 tap->wt_rate = rate; 2847 if (k != NULL) 2848 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2849 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2850 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2851 2852 ieee80211_radiotap_tx(vap, m); 2853 } 2854 2855 flags = 0; 2856 if (!ismcast) { 2857 /* Unicast frame, check if an ACK is expected. */ 2858 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2859 IEEE80211_QOS_ACKPOLICY_NOACK) 2860 flags |= WPI_TX_NEED_ACK; 2861 } 2862 2863 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2864 flags |= WPI_TX_AUTO_SEQ; 2865 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2866 flags |= WPI_TX_MORE_FRAG; 2867 2868 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2869 if (!ismcast) { 2870 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2871 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2872 flags |= WPI_TX_NEED_RTS; 2873 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2874 WPI_RATE_IS_OFDM(rate)) { 2875 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2876 flags |= WPI_TX_NEED_CTS; 2877 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2878 flags |= WPI_TX_NEED_RTS; 2879 } 2880 2881 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2882 flags |= WPI_TX_FULL_TXOP; 2883 } 2884 2885 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2886 if (type == IEEE80211_FC0_TYPE_MGT) { 2887 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2888 2889 /* Tell HW to set timestamp in probe responses. */ 2890 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2891 flags |= WPI_TX_INSERT_TSTAMP; 2892 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2893 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2894 tx->timeout = htole16(3); 2895 else 2896 tx->timeout = htole16(2); 2897 } 2898 2899 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2900 tx->id = WPI_ID_BROADCAST; 2901 else { 2902 if (wn->id == WPI_ID_UNDEFINED) { 2903 device_printf(sc->sc_dev, 2904 "%s: undefined node id\n", __func__); 2905 return (EINVAL); 2906 } 2907 2908 tx->id = wn->id; 2909 } 2910 2911 if (!swcrypt) { 2912 switch (k->wk_cipher->ic_cipher) { 2913 case IEEE80211_CIPHER_AES_CCM: 2914 tx->security = WPI_CIPHER_CCMP; 2915 break; 2916 2917 default: 2918 break; 2919 } 2920 2921 memcpy(tx->key, k->wk_key, k->wk_keylen); 2922 } 2923 2924 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2925 struct mbuf *next = m->m_nextpkt; 2926 2927 tx->lnext = htole16(next->m_pkthdr.len); 2928 tx->fnext = htole32(tx->security | 2929 (flags & WPI_TX_NEED_ACK) | 2930 WPI_NEXT_STA_ID(tx->id)); 2931 } 2932 2933 tx->len = htole16(totlen); 2934 tx->flags = htole32(flags); 2935 tx->plcp = rate2plcp(rate); 2936 tx->tid = tid; 2937 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2938 tx->ofdm_mask = 0xff; 2939 tx->cck_mask = 0x0f; 2940 tx->rts_ntries = 7; 2941 tx->data_ntries = tp->maxretry; 2942 2943 tx_data.ni = ni; 2944 tx_data.m = m; 2945 tx_data.size = sizeof(struct wpi_cmd_data); 2946 tx_data.code = WPI_CMD_TX_DATA; 2947 tx_data.ac = ac; 2948 2949 return wpi_cmd2(sc, &tx_data); 2950 } 2951 2952 static int 2953 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2954 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2955 { 2956 struct ieee80211vap *vap = ni->ni_vap; 2957 struct ieee80211_key *k = NULL; 2958 struct ieee80211_frame *wh; 2959 struct wpi_buf tx_data; 2960 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2961 uint32_t flags; 2962 uint8_t ac, type, rate; 2963 int swcrypt, totlen; 2964 2965 wh = mtod(m, struct ieee80211_frame *); 2966 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2967 swcrypt = 1; 2968 2969 ac = params->ibp_pri & 3; 2970 2971 /* Choose a TX rate index. */ 2972 rate = params->ibp_rate0; 2973 2974 flags = 0; 2975 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2976 flags |= WPI_TX_AUTO_SEQ; 2977 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2978 flags |= WPI_TX_NEED_ACK; 2979 if (params->ibp_flags & IEEE80211_BPF_RTS) 2980 flags |= WPI_TX_NEED_RTS; 2981 if (params->ibp_flags & IEEE80211_BPF_CTS) 2982 flags |= WPI_TX_NEED_CTS; 2983 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2984 flags |= WPI_TX_FULL_TXOP; 2985 2986 /* Encrypt the frame if need be. */ 2987 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2988 /* Retrieve key for TX. */ 2989 k = ieee80211_crypto_encap(ni, m); 2990 if (k == NULL) 2991 return (ENOBUFS); 2992 2993 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2994 2995 /* 802.11 header may have moved. */ 2996 wh = mtod(m, struct ieee80211_frame *); 2997 } 2998 totlen = m->m_pkthdr.len; 2999 3000 if (ieee80211_radiotap_active_vap(vap)) { 3001 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 3002 3003 tap->wt_flags = 0; 3004 tap->wt_rate = rate; 3005 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 3006 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3007 3008 ieee80211_radiotap_tx(vap, m); 3009 } 3010 3011 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3012 if (type == IEEE80211_FC0_TYPE_MGT) { 3013 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3014 3015 /* Tell HW to set timestamp in probe responses. */ 3016 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3017 flags |= WPI_TX_INSERT_TSTAMP; 3018 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3019 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3020 tx->timeout = htole16(3); 3021 else 3022 tx->timeout = htole16(2); 3023 } 3024 3025 if (!swcrypt) { 3026 switch (k->wk_cipher->ic_cipher) { 3027 case IEEE80211_CIPHER_AES_CCM: 3028 tx->security = WPI_CIPHER_CCMP; 3029 break; 3030 3031 default: 3032 break; 3033 } 3034 3035 memcpy(tx->key, k->wk_key, k->wk_keylen); 3036 } 3037 3038 tx->len = htole16(totlen); 3039 tx->flags = htole32(flags); 3040 tx->plcp = rate2plcp(rate); 3041 tx->id = WPI_ID_BROADCAST; 3042 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3043 tx->rts_ntries = params->ibp_try1; 3044 tx->data_ntries = params->ibp_try0; 3045 3046 tx_data.ni = ni; 3047 tx_data.m = m; 3048 tx_data.size = sizeof(struct wpi_cmd_data); 3049 tx_data.code = WPI_CMD_TX_DATA; 3050 tx_data.ac = ac; 3051 3052 return wpi_cmd2(sc, &tx_data); 3053 } 3054 3055 static __inline int 3056 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3057 { 3058 struct wpi_tx_ring *ring = &sc->txq[ac]; 3059 int retval; 3060 3061 WPI_TXQ_STATE_LOCK(sc); 3062 retval = WPI_TX_RING_HIMARK - ring->queued; 3063 WPI_TXQ_STATE_UNLOCK(sc); 3064 3065 return retval; 3066 } 3067 3068 static int 3069 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3070 const struct ieee80211_bpf_params *params) 3071 { 3072 struct ieee80211com *ic = ni->ni_ic; 3073 struct wpi_softc *sc = ic->ic_softc; 3074 uint16_t ac; 3075 int error = 0; 3076 3077 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3078 3079 ac = M_WME_GETAC(m); 3080 3081 WPI_TX_LOCK(sc); 3082 3083 /* NB: no fragments here */ 3084 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3085 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3086 goto unlock; 3087 } 3088 3089 if (params == NULL) { 3090 /* 3091 * Legacy path; interpret frame contents to decide 3092 * precisely how to send the frame. 3093 */ 3094 error = wpi_tx_data(sc, m, ni); 3095 } else { 3096 /* 3097 * Caller supplied explicit parameters to use in 3098 * sending the frame. 3099 */ 3100 error = wpi_tx_data_raw(sc, m, ni, params); 3101 } 3102 3103 unlock: WPI_TX_UNLOCK(sc); 3104 3105 if (error != 0) { 3106 m_freem(m); 3107 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3108 3109 return error; 3110 } 3111 3112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3113 3114 return 0; 3115 } 3116 3117 static int 3118 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3119 { 3120 struct wpi_softc *sc = ic->ic_softc; 3121 struct ieee80211_node *ni; 3122 struct mbuf *mnext; 3123 uint16_t ac; 3124 int error, nmbufs; 3125 3126 WPI_TX_LOCK(sc); 3127 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3128 3129 /* Check if interface is up & running. */ 3130 if (__predict_false(sc->sc_running == 0)) { 3131 error = ENXIO; 3132 goto unlock; 3133 } 3134 3135 nmbufs = 1; 3136 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3137 nmbufs++; 3138 3139 /* Check for available space. */ 3140 ac = M_WME_GETAC(m); 3141 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3142 error = ENOBUFS; 3143 goto unlock; 3144 } 3145 3146 error = 0; 3147 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3148 do { 3149 mnext = m->m_nextpkt; 3150 if (wpi_tx_data(sc, m, ni) != 0) { 3151 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3152 nmbufs); 3153 wpi_free_txfrags(sc, ac); 3154 ieee80211_free_mbuf(m); 3155 ieee80211_free_node(ni); 3156 break; 3157 } 3158 } while((m = mnext) != NULL); 3159 3160 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3161 3162 unlock: WPI_TX_UNLOCK(sc); 3163 3164 return (error); 3165 } 3166 3167 static void 3168 wpi_watchdog_rfkill(void *arg) 3169 { 3170 struct wpi_softc *sc = arg; 3171 struct ieee80211com *ic = &sc->sc_ic; 3172 3173 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3174 3175 /* No need to lock firmware memory. */ 3176 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3177 /* Radio kill switch is still off. */ 3178 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3179 sc); 3180 } else 3181 ieee80211_runtask(ic, &sc->sc_radioon_task); 3182 } 3183 3184 static void 3185 wpi_scan_timeout(void *arg) 3186 { 3187 struct wpi_softc *sc = arg; 3188 struct ieee80211com *ic = &sc->sc_ic; 3189 3190 ic_printf(ic, "scan timeout\n"); 3191 ieee80211_restart_all(ic); 3192 } 3193 3194 static void 3195 wpi_tx_timeout(void *arg) 3196 { 3197 struct wpi_softc *sc = arg; 3198 struct ieee80211com *ic = &sc->sc_ic; 3199 3200 ic_printf(ic, "device timeout\n"); 3201 ieee80211_restart_all(ic); 3202 } 3203 3204 static void 3205 wpi_parent(struct ieee80211com *ic) 3206 { 3207 struct wpi_softc *sc = ic->ic_softc; 3208 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3209 3210 if (ic->ic_nrunning > 0) { 3211 if (wpi_init(sc) == 0) { 3212 ieee80211_notify_radio(ic, 1); 3213 ieee80211_start_all(ic); 3214 } else { 3215 ieee80211_notify_radio(ic, 0); 3216 ieee80211_stop(vap); 3217 } 3218 } else { 3219 ieee80211_notify_radio(ic, 0); 3220 wpi_stop(sc); 3221 } 3222 } 3223 3224 /* 3225 * Send a command to the firmware. 3226 */ 3227 static int 3228 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3229 int async) 3230 { 3231 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3232 struct wpi_tx_desc *desc; 3233 struct wpi_tx_data *data; 3234 struct wpi_tx_cmd *cmd; 3235 struct mbuf *m; 3236 bus_addr_t paddr; 3237 uint16_t totlen; 3238 int error; 3239 3240 WPI_TXQ_LOCK(sc); 3241 3242 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3243 3244 if (__predict_false(sc->sc_running == 0)) { 3245 /* wpi_stop() was called */ 3246 if (code == WPI_CMD_SCAN) 3247 error = ENETDOWN; 3248 else 3249 error = 0; 3250 3251 goto fail; 3252 } 3253 3254 if (async == 0) 3255 WPI_LOCK_ASSERT(sc); 3256 3257 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3258 __func__, wpi_cmd_str(code), size, async); 3259 3260 desc = &ring->desc[ring->cur]; 3261 data = &ring->data[ring->cur]; 3262 totlen = 4 + size; 3263 3264 if (size > sizeof cmd->data) { 3265 /* Command is too large to fit in a descriptor. */ 3266 if (totlen > MCLBYTES) { 3267 error = EINVAL; 3268 goto fail; 3269 } 3270 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3271 if (m == NULL) { 3272 error = ENOMEM; 3273 goto fail; 3274 } 3275 cmd = mtod(m, struct wpi_tx_cmd *); 3276 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3277 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3278 if (error != 0) { 3279 m_freem(m); 3280 goto fail; 3281 } 3282 data->m = m; 3283 } else { 3284 cmd = &ring->cmd[ring->cur]; 3285 paddr = data->cmd_paddr; 3286 } 3287 3288 cmd->code = code; 3289 cmd->flags = 0; 3290 cmd->qid = ring->qid; 3291 cmd->idx = ring->cur; 3292 memcpy(cmd->data, buf, size); 3293 3294 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3295 desc->segs[0].addr = htole32(paddr); 3296 desc->segs[0].len = htole32(totlen); 3297 3298 if (size > sizeof cmd->data) { 3299 bus_dmamap_sync(ring->data_dmat, data->map, 3300 BUS_DMASYNC_PREWRITE); 3301 } else { 3302 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3303 BUS_DMASYNC_PREWRITE); 3304 } 3305 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3306 BUS_DMASYNC_PREWRITE); 3307 3308 /* Kick command ring. */ 3309 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3310 sc->sc_update_tx_ring(sc, ring); 3311 3312 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3313 3314 WPI_TXQ_UNLOCK(sc); 3315 3316 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3317 3318 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3319 3320 WPI_TXQ_UNLOCK(sc); 3321 3322 return error; 3323 } 3324 3325 /* 3326 * Configure HW multi-rate retries. 3327 */ 3328 static int 3329 wpi_mrr_setup(struct wpi_softc *sc) 3330 { 3331 struct ieee80211com *ic = &sc->sc_ic; 3332 struct wpi_mrr_setup mrr; 3333 uint8_t i; 3334 int error; 3335 3336 /* CCK rates (not used with 802.11a). */ 3337 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3338 mrr.rates[i].flags = 0; 3339 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3340 /* Fallback to the immediate lower CCK rate (if any.) */ 3341 mrr.rates[i].next = 3342 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3343 /* Try twice at this rate before falling back to "next". */ 3344 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3345 } 3346 /* OFDM rates (not used with 802.11b). */ 3347 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3348 mrr.rates[i].flags = 0; 3349 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3350 /* Fallback to the immediate lower rate (if any.) */ 3351 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3352 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3353 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3354 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3355 i - 1; 3356 /* Try twice at this rate before falling back to "next". */ 3357 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3358 } 3359 /* Setup MRR for control frames. */ 3360 mrr.which = htole32(WPI_MRR_CTL); 3361 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3362 if (error != 0) { 3363 device_printf(sc->sc_dev, 3364 "could not setup MRR for control frames\n"); 3365 return error; 3366 } 3367 /* Setup MRR for data frames. */ 3368 mrr.which = htole32(WPI_MRR_DATA); 3369 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3370 if (error != 0) { 3371 device_printf(sc->sc_dev, 3372 "could not setup MRR for data frames\n"); 3373 return error; 3374 } 3375 return 0; 3376 } 3377 3378 static int 3379 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3380 { 3381 struct ieee80211com *ic = ni->ni_ic; 3382 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3383 struct wpi_node *wn = WPI_NODE(ni); 3384 struct wpi_node_info node; 3385 int error; 3386 3387 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3388 3389 if (wn->id == WPI_ID_UNDEFINED) 3390 return EINVAL; 3391 3392 memset(&node, 0, sizeof node); 3393 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3394 node.id = wn->id; 3395 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3396 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3397 node.action = htole32(WPI_ACTION_SET_RATE); 3398 node.antenna = WPI_ANTENNA_BOTH; 3399 3400 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3401 wn->id, ether_sprintf(ni->ni_macaddr)); 3402 3403 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3404 if (error != 0) { 3405 device_printf(sc->sc_dev, 3406 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3407 error); 3408 return error; 3409 } 3410 3411 if (wvp->wv_gtk != 0) { 3412 error = wpi_set_global_keys(ni); 3413 if (error != 0) { 3414 device_printf(sc->sc_dev, 3415 "%s: error while setting global keys\n", __func__); 3416 return ENXIO; 3417 } 3418 } 3419 3420 return 0; 3421 } 3422 3423 /* 3424 * Broadcast node is used to send group-addressed and management frames. 3425 */ 3426 static int 3427 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3428 { 3429 struct ieee80211com *ic = &sc->sc_ic; 3430 struct wpi_node_info node; 3431 3432 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3433 3434 memset(&node, 0, sizeof node); 3435 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3436 node.id = WPI_ID_BROADCAST; 3437 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3438 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3439 node.action = htole32(WPI_ACTION_SET_RATE); 3440 node.antenna = WPI_ANTENNA_BOTH; 3441 3442 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3443 3444 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3445 } 3446 3447 static int 3448 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3449 { 3450 struct wpi_node *wn = WPI_NODE(ni); 3451 int error; 3452 3453 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3454 3455 wn->id = wpi_add_node_entry_sta(sc); 3456 3457 if ((error = wpi_add_node(sc, ni)) != 0) { 3458 wpi_del_node_entry(sc, wn->id); 3459 wn->id = WPI_ID_UNDEFINED; 3460 return error; 3461 } 3462 3463 return 0; 3464 } 3465 3466 static int 3467 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3468 { 3469 struct wpi_node *wn = WPI_NODE(ni); 3470 int error; 3471 3472 KASSERT(wn->id == WPI_ID_UNDEFINED, 3473 ("the node %d was added before", wn->id)); 3474 3475 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3476 3477 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3478 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3479 return ENOMEM; 3480 } 3481 3482 if ((error = wpi_add_node(sc, ni)) != 0) { 3483 wpi_del_node_entry(sc, wn->id); 3484 wn->id = WPI_ID_UNDEFINED; 3485 return error; 3486 } 3487 3488 return 0; 3489 } 3490 3491 static void 3492 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3493 { 3494 struct wpi_node *wn = WPI_NODE(ni); 3495 struct wpi_cmd_del_node node; 3496 int error; 3497 3498 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3499 3500 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3501 3502 memset(&node, 0, sizeof node); 3503 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3504 node.count = 1; 3505 3506 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3507 wn->id, ether_sprintf(ni->ni_macaddr)); 3508 3509 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3510 if (error != 0) { 3511 device_printf(sc->sc_dev, 3512 "%s: could not delete node %u, error %d\n", __func__, 3513 wn->id, error); 3514 } 3515 } 3516 3517 static int 3518 wpi_updateedca(struct ieee80211com *ic) 3519 { 3520 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3521 struct wpi_softc *sc = ic->ic_softc; 3522 struct wpi_edca_params cmd; 3523 int aci, error; 3524 3525 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3526 3527 memset(&cmd, 0, sizeof cmd); 3528 cmd.flags = htole32(WPI_EDCA_UPDATE); 3529 for (aci = 0; aci < WME_NUM_AC; aci++) { 3530 const struct wmeParams *ac = 3531 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3532 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3533 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3534 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3535 cmd.ac[aci].txoplimit = 3536 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3537 3538 DPRINTF(sc, WPI_DEBUG_EDCA, 3539 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3540 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3541 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3542 cmd.ac[aci].txoplimit); 3543 } 3544 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3545 3546 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3547 3548 return error; 3549 #undef WPI_EXP2 3550 } 3551 3552 static void 3553 wpi_set_promisc(struct wpi_softc *sc) 3554 { 3555 struct ieee80211com *ic = &sc->sc_ic; 3556 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3557 uint32_t promisc_filter; 3558 3559 promisc_filter = WPI_FILTER_CTL; 3560 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3561 promisc_filter |= WPI_FILTER_PROMISC; 3562 3563 if (ic->ic_promisc > 0) 3564 sc->rxon.filter |= htole32(promisc_filter); 3565 else 3566 sc->rxon.filter &= ~htole32(promisc_filter); 3567 } 3568 3569 static void 3570 wpi_update_promisc(struct ieee80211com *ic) 3571 { 3572 struct wpi_softc *sc = ic->ic_softc; 3573 3574 WPI_LOCK(sc); 3575 if (sc->sc_running == 0) { 3576 WPI_UNLOCK(sc); 3577 return; 3578 } 3579 WPI_UNLOCK(sc); 3580 3581 WPI_RXON_LOCK(sc); 3582 wpi_set_promisc(sc); 3583 3584 if (wpi_send_rxon(sc, 1, 1) != 0) { 3585 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3586 __func__); 3587 } 3588 WPI_RXON_UNLOCK(sc); 3589 } 3590 3591 static void 3592 wpi_update_mcast(struct ieee80211com *ic) 3593 { 3594 /* Ignore */ 3595 } 3596 3597 static void 3598 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3599 { 3600 struct wpi_cmd_led led; 3601 3602 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3603 3604 led.which = which; 3605 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3606 led.off = off; 3607 led.on = on; 3608 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3609 } 3610 3611 static int 3612 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3613 { 3614 struct wpi_cmd_timing cmd; 3615 uint64_t val, mod; 3616 3617 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3618 3619 memset(&cmd, 0, sizeof cmd); 3620 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3621 cmd.bintval = htole16(ni->ni_intval); 3622 cmd.lintval = htole16(10); 3623 3624 /* Compute remaining time until next beacon. */ 3625 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3626 mod = le64toh(cmd.tstamp) % val; 3627 cmd.binitval = htole32((uint32_t)(val - mod)); 3628 3629 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3630 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3631 3632 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3633 } 3634 3635 /* 3636 * This function is called periodically (every 60 seconds) to adjust output 3637 * power to temperature changes. 3638 */ 3639 static void 3640 wpi_power_calibration(struct wpi_softc *sc) 3641 { 3642 int temp; 3643 3644 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3645 3646 /* Update sensor data. */ 3647 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3648 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3649 3650 /* Sanity-check read value. */ 3651 if (temp < -260 || temp > 25) { 3652 /* This can't be correct, ignore. */ 3653 DPRINTF(sc, WPI_DEBUG_TEMP, 3654 "out-of-range temperature reported: %d\n", temp); 3655 return; 3656 } 3657 3658 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3659 3660 /* Adjust Tx power if need be. */ 3661 if (abs(temp - sc->temp) <= 6) 3662 return; 3663 3664 sc->temp = temp; 3665 3666 if (wpi_set_txpower(sc, 1) != 0) { 3667 /* just warn, too bad for the automatic calibration... */ 3668 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3669 } 3670 } 3671 3672 /* 3673 * Set TX power for current channel. 3674 */ 3675 static int 3676 wpi_set_txpower(struct wpi_softc *sc, int async) 3677 { 3678 struct wpi_power_group *group; 3679 struct wpi_cmd_txpower cmd; 3680 uint8_t chan; 3681 int idx, is_chan_5ghz, i; 3682 3683 /* Retrieve current channel from last RXON. */ 3684 chan = sc->rxon.chan; 3685 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3686 3687 /* Find the TX power group to which this channel belongs. */ 3688 if (is_chan_5ghz) { 3689 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3690 if (chan <= group->chan) 3691 break; 3692 } else 3693 group = &sc->groups[0]; 3694 3695 memset(&cmd, 0, sizeof cmd); 3696 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3697 cmd.chan = htole16(chan); 3698 3699 /* Set TX power for all OFDM and CCK rates. */ 3700 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3701 /* Retrieve TX power for this channel/rate. */ 3702 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3703 3704 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3705 3706 if (is_chan_5ghz) { 3707 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3708 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3709 } else { 3710 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3711 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3712 } 3713 DPRINTF(sc, WPI_DEBUG_TEMP, 3714 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3715 } 3716 3717 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3718 } 3719 3720 /* 3721 * Determine Tx power index for a given channel/rate combination. 3722 * This takes into account the regulatory information from EEPROM and the 3723 * current temperature. 3724 */ 3725 static int 3726 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3727 uint8_t chan, int is_chan_5ghz, int ridx) 3728 { 3729 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3730 #define fdivround(a, b, n) \ 3731 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3732 3733 /* Linear interpolation. */ 3734 #define interpolate(x, x1, y1, x2, y2, n) \ 3735 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3736 3737 struct wpi_power_sample *sample; 3738 int pwr, idx; 3739 3740 /* Default TX power is group maximum TX power minus 3dB. */ 3741 pwr = group->maxpwr / 2; 3742 3743 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3744 switch (ridx) { 3745 case WPI_RIDX_OFDM36: 3746 pwr -= is_chan_5ghz ? 5 : 0; 3747 break; 3748 case WPI_RIDX_OFDM48: 3749 pwr -= is_chan_5ghz ? 10 : 7; 3750 break; 3751 case WPI_RIDX_OFDM54: 3752 pwr -= is_chan_5ghz ? 12 : 9; 3753 break; 3754 } 3755 3756 /* Never exceed the channel maximum allowed TX power. */ 3757 pwr = min(pwr, sc->maxpwr[chan]); 3758 3759 /* Retrieve TX power index into gain tables from samples. */ 3760 for (sample = group->samples; sample < &group->samples[3]; sample++) 3761 if (pwr > sample[1].power) 3762 break; 3763 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3764 idx = interpolate(pwr, sample[0].power, sample[0].index, 3765 sample[1].power, sample[1].index, 19); 3766 3767 /*- 3768 * Adjust power index based on current temperature: 3769 * - if cooler than factory-calibrated: decrease output power 3770 * - if warmer than factory-calibrated: increase output power 3771 */ 3772 idx -= (sc->temp - group->temp) * 11 / 100; 3773 3774 /* Decrease TX power for CCK rates (-5dB). */ 3775 if (ridx >= WPI_RIDX_CCK1) 3776 idx += 10; 3777 3778 /* Make sure idx stays in a valid range. */ 3779 if (idx < 0) 3780 return 0; 3781 if (idx > WPI_MAX_PWR_INDEX) 3782 return WPI_MAX_PWR_INDEX; 3783 return idx; 3784 3785 #undef interpolate 3786 #undef fdivround 3787 } 3788 3789 /* 3790 * Set STA mode power saving level (between 0 and 5). 3791 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3792 */ 3793 static int 3794 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3795 { 3796 struct wpi_pmgt_cmd cmd; 3797 const struct wpi_pmgt *pmgt; 3798 uint32_t max, reg; 3799 uint8_t skip_dtim; 3800 int i; 3801 3802 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3803 "%s: dtim=%d, level=%d, async=%d\n", 3804 __func__, dtim, level, async); 3805 3806 /* Select which PS parameters to use. */ 3807 if (dtim <= 10) 3808 pmgt = &wpi_pmgt[0][level]; 3809 else 3810 pmgt = &wpi_pmgt[1][level]; 3811 3812 memset(&cmd, 0, sizeof cmd); 3813 if (level != 0) /* not CAM */ 3814 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3815 /* Retrieve PCIe Active State Power Management (ASPM). */ 3816 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3817 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3818 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3819 3820 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3821 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3822 3823 if (dtim == 0) { 3824 dtim = 1; 3825 skip_dtim = 0; 3826 } else 3827 skip_dtim = pmgt->skip_dtim; 3828 3829 if (skip_dtim != 0) { 3830 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3831 max = pmgt->intval[4]; 3832 if (max == (uint32_t)-1) 3833 max = dtim * (skip_dtim + 1); 3834 else if (max > dtim) 3835 max = (max / dtim) * dtim; 3836 } else 3837 max = dtim; 3838 3839 for (i = 0; i < 5; i++) 3840 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3841 3842 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3843 } 3844 3845 static int 3846 wpi_send_btcoex(struct wpi_softc *sc) 3847 { 3848 struct wpi_bluetooth cmd; 3849 3850 memset(&cmd, 0, sizeof cmd); 3851 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3852 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3853 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3854 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3855 __func__); 3856 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3857 } 3858 3859 static int 3860 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3861 { 3862 int error; 3863 3864 if (async) 3865 WPI_RXON_LOCK_ASSERT(sc); 3866 3867 if (assoc && wpi_check_bss_filter(sc) != 0) { 3868 struct wpi_assoc rxon_assoc; 3869 3870 rxon_assoc.flags = sc->rxon.flags; 3871 rxon_assoc.filter = sc->rxon.filter; 3872 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3873 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3874 rxon_assoc.reserved = 0; 3875 3876 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3877 sizeof (struct wpi_assoc), async); 3878 if (error != 0) { 3879 device_printf(sc->sc_dev, 3880 "RXON_ASSOC command failed, error %d\n", error); 3881 return error; 3882 } 3883 } else { 3884 if (async) { 3885 WPI_NT_LOCK(sc); 3886 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3887 sizeof (struct wpi_rxon), async); 3888 if (error == 0) 3889 wpi_clear_node_table(sc); 3890 WPI_NT_UNLOCK(sc); 3891 } else { 3892 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3893 sizeof (struct wpi_rxon), async); 3894 if (error == 0) 3895 wpi_clear_node_table(sc); 3896 } 3897 3898 if (error != 0) { 3899 device_printf(sc->sc_dev, 3900 "RXON command failed, error %d\n", error); 3901 return error; 3902 } 3903 3904 /* Add broadcast node. */ 3905 error = wpi_add_broadcast_node(sc, async); 3906 if (error != 0) { 3907 device_printf(sc->sc_dev, 3908 "could not add broadcast node, error %d\n", error); 3909 return error; 3910 } 3911 } 3912 3913 /* Configuration has changed, set Tx power accordingly. */ 3914 if ((error = wpi_set_txpower(sc, async)) != 0) { 3915 device_printf(sc->sc_dev, 3916 "%s: could not set TX power, error %d\n", __func__, error); 3917 return error; 3918 } 3919 3920 return 0; 3921 } 3922 3923 /** 3924 * Configure the card to listen to a particular channel, this transisions the 3925 * card in to being able to receive frames from remote devices. 3926 */ 3927 static int 3928 wpi_config(struct wpi_softc *sc) 3929 { 3930 struct ieee80211com *ic = &sc->sc_ic; 3931 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3932 struct ieee80211_channel *c = ic->ic_curchan; 3933 int error; 3934 3935 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3936 3937 /* Set power saving level to CAM during initialization. */ 3938 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3939 device_printf(sc->sc_dev, 3940 "%s: could not set power saving level\n", __func__); 3941 return error; 3942 } 3943 3944 /* Configure bluetooth coexistence. */ 3945 if ((error = wpi_send_btcoex(sc)) != 0) { 3946 device_printf(sc->sc_dev, 3947 "could not configure bluetooth coexistence\n"); 3948 return error; 3949 } 3950 3951 /* Configure adapter. */ 3952 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3953 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3954 3955 /* Set default channel. */ 3956 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3957 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3958 if (IEEE80211_IS_CHAN_2GHZ(c)) 3959 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3960 3961 sc->rxon.filter = WPI_FILTER_MULTICAST; 3962 switch (ic->ic_opmode) { 3963 case IEEE80211_M_STA: 3964 sc->rxon.mode = WPI_MODE_STA; 3965 break; 3966 case IEEE80211_M_IBSS: 3967 sc->rxon.mode = WPI_MODE_IBSS; 3968 sc->rxon.filter |= WPI_FILTER_BEACON; 3969 break; 3970 case IEEE80211_M_HOSTAP: 3971 /* XXX workaround for beaconing */ 3972 sc->rxon.mode = WPI_MODE_IBSS; 3973 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3974 break; 3975 case IEEE80211_M_AHDEMO: 3976 sc->rxon.mode = WPI_MODE_HOSTAP; 3977 break; 3978 case IEEE80211_M_MONITOR: 3979 sc->rxon.mode = WPI_MODE_MONITOR; 3980 break; 3981 default: 3982 device_printf(sc->sc_dev, "unknown opmode %d\n", 3983 ic->ic_opmode); 3984 return EINVAL; 3985 } 3986 sc->rxon.filter = htole32(sc->rxon.filter); 3987 wpi_set_promisc(sc); 3988 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3989 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3990 3991 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3992 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3993 __func__); 3994 return error; 3995 } 3996 3997 /* Setup rate scalling. */ 3998 if ((error = wpi_mrr_setup(sc)) != 0) { 3999 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 4000 error); 4001 return error; 4002 } 4003 4004 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4005 4006 return 0; 4007 } 4008 4009 static uint16_t 4010 wpi_get_active_dwell_time(struct wpi_softc *sc, 4011 struct ieee80211_channel *c, uint8_t n_probes) 4012 { 4013 /* No channel? Default to 2GHz settings. */ 4014 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4015 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4016 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4017 } 4018 4019 /* 5GHz dwell time. */ 4020 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4021 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4022 } 4023 4024 /* 4025 * Limit the total dwell time. 4026 * 4027 * Returns the dwell time in milliseconds. 4028 */ 4029 static uint16_t 4030 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4031 { 4032 struct ieee80211com *ic = &sc->sc_ic; 4033 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4034 uint16_t bintval = 0; 4035 4036 /* bintval is in TU (1.024mS) */ 4037 if (vap != NULL) 4038 bintval = vap->iv_bss->ni_intval; 4039 4040 /* 4041 * If it's non-zero, we should calculate the minimum of 4042 * it and the DWELL_BASE. 4043 * 4044 * XXX Yes, the math should take into account that bintval 4045 * is 1.024mS, not 1mS.. 4046 */ 4047 if (bintval > 0) { 4048 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4049 bintval); 4050 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4051 } 4052 4053 /* No association context? Default. */ 4054 return dwell_time; 4055 } 4056 4057 static uint16_t 4058 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4059 { 4060 uint16_t passive; 4061 4062 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4063 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4064 else 4065 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4066 4067 /* Clamp to the beacon interval if we're associated. */ 4068 return (wpi_limit_dwell(sc, passive)); 4069 } 4070 4071 static uint32_t 4072 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4073 { 4074 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4075 uint32_t nbeacons = time / bintval; 4076 4077 if (mod > WPI_PAUSE_MAX_TIME) 4078 mod = WPI_PAUSE_MAX_TIME; 4079 4080 return WPI_PAUSE_SCAN(nbeacons, mod); 4081 } 4082 4083 /* 4084 * Send a scan request to the firmware. 4085 */ 4086 static int 4087 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4088 { 4089 struct ieee80211com *ic = &sc->sc_ic; 4090 struct ieee80211_scan_state *ss = ic->ic_scan; 4091 struct ieee80211vap *vap = ss->ss_vap; 4092 struct wpi_scan_hdr *hdr; 4093 struct wpi_cmd_data *tx; 4094 struct wpi_scan_essid *essids; 4095 struct wpi_scan_chan *chan; 4096 struct ieee80211_frame *wh; 4097 struct ieee80211_rateset *rs; 4098 uint16_t bintval, buflen, dwell_active, dwell_passive; 4099 uint8_t *buf, *frm, i, nssid; 4100 int bgscan, error; 4101 4102 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4103 4104 /* 4105 * We are absolutely not allowed to send a scan command when another 4106 * scan command is pending. 4107 */ 4108 if (callout_pending(&sc->scan_timeout)) { 4109 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4110 __func__); 4111 error = EAGAIN; 4112 goto fail; 4113 } 4114 4115 bgscan = wpi_check_bss_filter(sc); 4116 bintval = vap->iv_bss->ni_intval; 4117 if (bgscan != 0 && 4118 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4119 error = EOPNOTSUPP; 4120 goto fail; 4121 } 4122 4123 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4124 if (buf == NULL) { 4125 device_printf(sc->sc_dev, 4126 "%s: could not allocate buffer for scan command\n", 4127 __func__); 4128 error = ENOMEM; 4129 goto fail; 4130 } 4131 hdr = (struct wpi_scan_hdr *)buf; 4132 4133 /* 4134 * Move to the next channel if no packets are received within 10 msecs 4135 * after sending the probe request. 4136 */ 4137 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4138 hdr->quiet_threshold = htole16(1); 4139 4140 if (bgscan != 0) { 4141 /* 4142 * Max needs to be greater than active and passive and quiet! 4143 * It's also in microseconds! 4144 */ 4145 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4146 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4147 bintval)); 4148 } 4149 4150 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4151 4152 tx = (struct wpi_cmd_data *)(hdr + 1); 4153 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4154 tx->id = WPI_ID_BROADCAST; 4155 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4156 4157 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4158 /* Send probe requests at 6Mbps. */ 4159 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4160 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4161 } else { 4162 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4163 /* Send probe requests at 1Mbps. */ 4164 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4165 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4166 } 4167 4168 essids = (struct wpi_scan_essid *)(tx + 1); 4169 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4170 for (i = 0; i < nssid; i++) { 4171 essids[i].id = IEEE80211_ELEMID_SSID; 4172 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4173 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4174 #ifdef WPI_DEBUG 4175 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4176 printf("Scanning Essid: "); 4177 ieee80211_print_essid(essids[i].data, essids[i].len); 4178 printf("\n"); 4179 } 4180 #endif 4181 } 4182 4183 /* 4184 * Build a probe request frame. Most of the following code is a 4185 * copy & paste of what is done in net80211. 4186 */ 4187 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4188 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4189 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4190 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4191 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4192 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4193 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4194 4195 frm = (uint8_t *)(wh + 1); 4196 frm = ieee80211_add_ssid(frm, NULL, 0); 4197 frm = ieee80211_add_rates(frm, rs); 4198 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4199 frm = ieee80211_add_xrates(frm, rs); 4200 4201 /* Set length of probe request. */ 4202 tx->len = htole16(frm - (uint8_t *)wh); 4203 4204 /* 4205 * Construct information about the channel that we 4206 * want to scan. The firmware expects this to be directly 4207 * after the scan probe request 4208 */ 4209 chan = (struct wpi_scan_chan *)frm; 4210 chan->chan = ieee80211_chan2ieee(ic, c); 4211 chan->flags = 0; 4212 if (nssid) { 4213 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4214 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4215 } else 4216 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4217 4218 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4219 chan->flags |= WPI_CHAN_ACTIVE; 4220 4221 /* 4222 * Calculate the active/passive dwell times. 4223 */ 4224 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4225 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4226 4227 /* Make sure they're valid. */ 4228 if (dwell_active > dwell_passive) 4229 dwell_active = dwell_passive; 4230 4231 chan->active = htole16(dwell_active); 4232 chan->passive = htole16(dwell_passive); 4233 4234 chan->dsp_gain = 0x6e; /* Default level */ 4235 4236 if (IEEE80211_IS_CHAN_5GHZ(c)) 4237 chan->rf_gain = 0x3b; 4238 else 4239 chan->rf_gain = 0x28; 4240 4241 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4242 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4243 4244 hdr->nchan++; 4245 4246 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4247 /* XXX Force probe request transmission. */ 4248 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4249 4250 chan++; 4251 4252 /* Reduce unnecessary delay. */ 4253 chan->flags = 0; 4254 chan->passive = chan->active = hdr->quiet_time; 4255 4256 hdr->nchan++; 4257 } 4258 4259 chan++; 4260 4261 buflen = (uint8_t *)chan - buf; 4262 hdr->len = htole16(buflen); 4263 4264 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4265 hdr->nchan); 4266 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4267 free(buf, M_DEVBUF); 4268 4269 if (error != 0) 4270 goto fail; 4271 4272 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4273 4274 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4275 4276 return 0; 4277 4278 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4279 4280 return error; 4281 } 4282 4283 static int 4284 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4285 { 4286 struct ieee80211com *ic = vap->iv_ic; 4287 struct ieee80211_node *ni = vap->iv_bss; 4288 struct ieee80211_channel *c = ni->ni_chan; 4289 int error; 4290 4291 WPI_RXON_LOCK(sc); 4292 4293 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4294 4295 /* Update adapter configuration. */ 4296 sc->rxon.associd = 0; 4297 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4298 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4299 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4300 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4301 if (IEEE80211_IS_CHAN_2GHZ(c)) 4302 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4303 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4304 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4305 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4306 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4307 if (IEEE80211_IS_CHAN_A(c)) { 4308 sc->rxon.cck_mask = 0; 4309 sc->rxon.ofdm_mask = 0x15; 4310 } else if (IEEE80211_IS_CHAN_B(c)) { 4311 sc->rxon.cck_mask = 0x03; 4312 sc->rxon.ofdm_mask = 0; 4313 } else { 4314 /* Assume 802.11b/g. */ 4315 sc->rxon.cck_mask = 0x0f; 4316 sc->rxon.ofdm_mask = 0x15; 4317 } 4318 4319 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4320 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4321 sc->rxon.ofdm_mask); 4322 4323 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4324 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4325 __func__); 4326 } 4327 4328 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4329 4330 WPI_RXON_UNLOCK(sc); 4331 4332 return error; 4333 } 4334 4335 static int 4336 wpi_config_beacon(struct wpi_vap *wvp) 4337 { 4338 struct ieee80211vap *vap = &wvp->wv_vap; 4339 struct ieee80211com *ic = vap->iv_ic; 4340 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4341 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4342 struct wpi_softc *sc = ic->ic_softc; 4343 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4344 struct ieee80211_tim_ie *tie; 4345 struct mbuf *m; 4346 uint8_t *ptr; 4347 int error; 4348 4349 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4350 4351 WPI_VAP_LOCK_ASSERT(wvp); 4352 4353 cmd->len = htole16(bcn->m->m_pkthdr.len); 4354 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4355 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4356 4357 /* XXX seems to be unused */ 4358 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4359 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4360 ptr = mtod(bcn->m, uint8_t *); 4361 4362 cmd->tim = htole16(bo->bo_tim - ptr); 4363 cmd->timsz = tie->tim_len; 4364 } 4365 4366 /* Necessary for recursion in ieee80211_beacon_update(). */ 4367 m = bcn->m; 4368 bcn->m = m_dup(m, M_NOWAIT); 4369 if (bcn->m == NULL) { 4370 device_printf(sc->sc_dev, 4371 "%s: could not copy beacon frame\n", __func__); 4372 error = ENOMEM; 4373 goto end; 4374 } 4375 4376 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4377 device_printf(sc->sc_dev, 4378 "%s: could not update beacon frame, error %d", __func__, 4379 error); 4380 m_freem(bcn->m); 4381 } 4382 4383 /* Restore mbuf. */ 4384 end: bcn->m = m; 4385 4386 return error; 4387 } 4388 4389 static int 4390 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4391 { 4392 struct ieee80211vap *vap = ni->ni_vap; 4393 struct wpi_vap *wvp = WPI_VAP(vap); 4394 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4395 struct mbuf *m; 4396 int error; 4397 4398 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4399 4400 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4401 return EINVAL; 4402 4403 m = ieee80211_beacon_alloc(ni); 4404 if (m == NULL) { 4405 device_printf(sc->sc_dev, 4406 "%s: could not allocate beacon frame\n", __func__); 4407 return ENOMEM; 4408 } 4409 4410 WPI_VAP_LOCK(wvp); 4411 if (bcn->m != NULL) 4412 m_freem(bcn->m); 4413 4414 bcn->m = m; 4415 4416 error = wpi_config_beacon(wvp); 4417 WPI_VAP_UNLOCK(wvp); 4418 4419 return error; 4420 } 4421 4422 static void 4423 wpi_update_beacon(struct ieee80211vap *vap, int item) 4424 { 4425 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4426 struct wpi_vap *wvp = WPI_VAP(vap); 4427 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4428 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4429 struct ieee80211_node *ni = vap->iv_bss; 4430 int mcast = 0; 4431 4432 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4433 4434 WPI_VAP_LOCK(wvp); 4435 if (bcn->m == NULL) { 4436 bcn->m = ieee80211_beacon_alloc(ni); 4437 if (bcn->m == NULL) { 4438 device_printf(sc->sc_dev, 4439 "%s: could not allocate beacon frame\n", __func__); 4440 4441 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4442 __func__); 4443 4444 WPI_VAP_UNLOCK(wvp); 4445 return; 4446 } 4447 } 4448 WPI_VAP_UNLOCK(wvp); 4449 4450 if (item == IEEE80211_BEACON_TIM) 4451 mcast = 1; /* TODO */ 4452 4453 setbit(bo->bo_flags, item); 4454 ieee80211_beacon_update(ni, bcn->m, mcast); 4455 4456 WPI_VAP_LOCK(wvp); 4457 wpi_config_beacon(wvp); 4458 WPI_VAP_UNLOCK(wvp); 4459 4460 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4461 } 4462 4463 static void 4464 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4465 { 4466 struct ieee80211vap *vap = ni->ni_vap; 4467 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4468 struct wpi_node *wn = WPI_NODE(ni); 4469 int error; 4470 4471 WPI_NT_LOCK(sc); 4472 4473 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4474 4475 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4476 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4477 device_printf(sc->sc_dev, 4478 "%s: could not add IBSS node, error %d\n", 4479 __func__, error); 4480 } 4481 } 4482 WPI_NT_UNLOCK(sc); 4483 } 4484 4485 static int 4486 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4487 { 4488 struct ieee80211com *ic = vap->iv_ic; 4489 struct ieee80211_node *ni = vap->iv_bss; 4490 struct ieee80211_channel *c = ni->ni_chan; 4491 int error; 4492 4493 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4494 4495 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4496 /* Link LED blinks while monitoring. */ 4497 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4498 return 0; 4499 } 4500 4501 /* XXX kernel panic workaround */ 4502 if (c == IEEE80211_CHAN_ANYC) { 4503 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4504 __func__); 4505 return EINVAL; 4506 } 4507 4508 if ((error = wpi_set_timing(sc, ni)) != 0) { 4509 device_printf(sc->sc_dev, 4510 "%s: could not set timing, error %d\n", __func__, error); 4511 return error; 4512 } 4513 4514 /* Update adapter configuration. */ 4515 WPI_RXON_LOCK(sc); 4516 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4517 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4518 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4519 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4520 if (IEEE80211_IS_CHAN_2GHZ(c)) 4521 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4522 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4523 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4524 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4525 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4526 if (IEEE80211_IS_CHAN_A(c)) { 4527 sc->rxon.cck_mask = 0; 4528 sc->rxon.ofdm_mask = 0x15; 4529 } else if (IEEE80211_IS_CHAN_B(c)) { 4530 sc->rxon.cck_mask = 0x03; 4531 sc->rxon.ofdm_mask = 0; 4532 } else { 4533 /* Assume 802.11b/g. */ 4534 sc->rxon.cck_mask = 0x0f; 4535 sc->rxon.ofdm_mask = 0x15; 4536 } 4537 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4538 4539 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4540 sc->rxon.chan, sc->rxon.flags); 4541 4542 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4543 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4544 __func__); 4545 return error; 4546 } 4547 4548 /* Start periodic calibration timer. */ 4549 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4550 4551 WPI_RXON_UNLOCK(sc); 4552 4553 if (vap->iv_opmode == IEEE80211_M_IBSS || 4554 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4555 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4556 device_printf(sc->sc_dev, 4557 "%s: could not setup beacon, error %d\n", __func__, 4558 error); 4559 return error; 4560 } 4561 } 4562 4563 if (vap->iv_opmode == IEEE80211_M_STA) { 4564 /* Add BSS node. */ 4565 WPI_NT_LOCK(sc); 4566 error = wpi_add_sta_node(sc, ni); 4567 WPI_NT_UNLOCK(sc); 4568 if (error != 0) { 4569 device_printf(sc->sc_dev, 4570 "%s: could not add BSS node, error %d\n", __func__, 4571 error); 4572 return error; 4573 } 4574 } 4575 4576 /* Link LED always on while associated. */ 4577 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4578 4579 /* Enable power-saving mode if requested by user. */ 4580 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4581 vap->iv_opmode != IEEE80211_M_IBSS) 4582 (void)wpi_set_pslevel(sc, 0, 3, 1); 4583 4584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4585 4586 return 0; 4587 } 4588 4589 static int 4590 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4591 { 4592 const struct ieee80211_cipher *cip = k->wk_cipher; 4593 struct ieee80211vap *vap = ni->ni_vap; 4594 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4595 struct wpi_node *wn = WPI_NODE(ni); 4596 struct wpi_node_info node; 4597 uint16_t kflags; 4598 int error; 4599 4600 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4601 4602 if (wpi_check_node_entry(sc, wn->id) == 0) { 4603 device_printf(sc->sc_dev, "%s: node does not exist\n", 4604 __func__); 4605 return 0; 4606 } 4607 4608 switch (cip->ic_cipher) { 4609 case IEEE80211_CIPHER_AES_CCM: 4610 kflags = WPI_KFLAG_CCMP; 4611 break; 4612 4613 default: 4614 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4615 cip->ic_cipher); 4616 return 0; 4617 } 4618 4619 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4620 if (k->wk_flags & IEEE80211_KEY_GROUP) 4621 kflags |= WPI_KFLAG_MULTICAST; 4622 4623 memset(&node, 0, sizeof node); 4624 node.id = wn->id; 4625 node.control = WPI_NODE_UPDATE; 4626 node.flags = WPI_FLAG_KEY_SET; 4627 node.kflags = htole16(kflags); 4628 memcpy(node.key, k->wk_key, k->wk_keylen); 4629 again: 4630 DPRINTF(sc, WPI_DEBUG_KEY, 4631 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4632 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4633 node.id, ether_sprintf(ni->ni_macaddr)); 4634 4635 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4636 if (error != 0) { 4637 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4638 error); 4639 return !error; 4640 } 4641 4642 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4643 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4644 kflags |= WPI_KFLAG_MULTICAST; 4645 node.kflags = htole16(kflags); 4646 4647 goto again; 4648 } 4649 4650 return 1; 4651 } 4652 4653 static void 4654 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4655 { 4656 const struct ieee80211_key *k = arg; 4657 struct ieee80211vap *vap = ni->ni_vap; 4658 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4659 struct wpi_node *wn = WPI_NODE(ni); 4660 int error; 4661 4662 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4663 return; 4664 4665 WPI_NT_LOCK(sc); 4666 error = wpi_load_key(ni, k); 4667 WPI_NT_UNLOCK(sc); 4668 4669 if (error == 0) { 4670 device_printf(sc->sc_dev, "%s: error while setting key\n", 4671 __func__); 4672 } 4673 } 4674 4675 static int 4676 wpi_set_global_keys(struct ieee80211_node *ni) 4677 { 4678 struct ieee80211vap *vap = ni->ni_vap; 4679 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4680 int error = 1; 4681 4682 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4683 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4684 error = wpi_load_key(ni, wk); 4685 4686 return !error; 4687 } 4688 4689 static int 4690 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4691 { 4692 struct ieee80211vap *vap = ni->ni_vap; 4693 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4694 struct wpi_node *wn = WPI_NODE(ni); 4695 struct wpi_node_info node; 4696 uint16_t kflags; 4697 int error; 4698 4699 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4700 4701 if (wpi_check_node_entry(sc, wn->id) == 0) { 4702 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4703 return 1; /* Nothing to do. */ 4704 } 4705 4706 kflags = WPI_KFLAG_KID(k->wk_keyix); 4707 if (k->wk_flags & IEEE80211_KEY_GROUP) 4708 kflags |= WPI_KFLAG_MULTICAST; 4709 4710 memset(&node, 0, sizeof node); 4711 node.id = wn->id; 4712 node.control = WPI_NODE_UPDATE; 4713 node.flags = WPI_FLAG_KEY_SET; 4714 node.kflags = htole16(kflags); 4715 again: 4716 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4717 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4718 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4719 4720 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4721 if (error != 0) { 4722 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4723 error); 4724 return !error; 4725 } 4726 4727 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4728 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4729 kflags |= WPI_KFLAG_MULTICAST; 4730 node.kflags = htole16(kflags); 4731 4732 goto again; 4733 } 4734 4735 return 1; 4736 } 4737 4738 static void 4739 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4740 { 4741 const struct ieee80211_key *k = arg; 4742 struct ieee80211vap *vap = ni->ni_vap; 4743 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4744 struct wpi_node *wn = WPI_NODE(ni); 4745 int error; 4746 4747 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4748 return; 4749 4750 WPI_NT_LOCK(sc); 4751 error = wpi_del_key(ni, k); 4752 WPI_NT_UNLOCK(sc); 4753 4754 if (error == 0) { 4755 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4756 __func__); 4757 } 4758 } 4759 4760 static int 4761 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4762 int set) 4763 { 4764 struct ieee80211com *ic = vap->iv_ic; 4765 struct wpi_softc *sc = ic->ic_softc; 4766 struct wpi_vap *wvp = WPI_VAP(vap); 4767 struct ieee80211_node *ni; 4768 int error, ni_ref = 0; 4769 4770 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4771 4772 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4773 /* Not for us. */ 4774 return 1; 4775 } 4776 4777 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4778 /* XMIT keys are handled in wpi_tx_data(). */ 4779 return 1; 4780 } 4781 4782 /* Handle group keys. */ 4783 if (&vap->iv_nw_keys[0] <= k && 4784 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4785 WPI_NT_LOCK(sc); 4786 if (set) 4787 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4788 else 4789 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4790 WPI_NT_UNLOCK(sc); 4791 4792 if (vap->iv_state == IEEE80211_S_RUN) { 4793 ieee80211_iterate_nodes(&ic->ic_sta, 4794 set ? wpi_load_key_cb : wpi_del_key_cb, 4795 __DECONST(void *, k)); 4796 } 4797 4798 return 1; 4799 } 4800 4801 switch (vap->iv_opmode) { 4802 case IEEE80211_M_STA: 4803 ni = vap->iv_bss; 4804 break; 4805 4806 case IEEE80211_M_IBSS: 4807 case IEEE80211_M_AHDEMO: 4808 case IEEE80211_M_HOSTAP: 4809 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4810 if (ni == NULL) 4811 return 0; /* should not happen */ 4812 4813 ni_ref = 1; 4814 break; 4815 4816 default: 4817 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4818 vap->iv_opmode); 4819 return 0; 4820 } 4821 4822 WPI_NT_LOCK(sc); 4823 if (set) 4824 error = wpi_load_key(ni, k); 4825 else 4826 error = wpi_del_key(ni, k); 4827 WPI_NT_UNLOCK(sc); 4828 4829 if (ni_ref) 4830 ieee80211_node_decref(ni); 4831 4832 return error; 4833 } 4834 4835 static int 4836 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4837 { 4838 return wpi_process_key(vap, k, 1); 4839 } 4840 4841 static int 4842 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4843 { 4844 return wpi_process_key(vap, k, 0); 4845 } 4846 4847 /* 4848 * This function is called after the runtime firmware notifies us of its 4849 * readiness (called in a process context). 4850 */ 4851 static int 4852 wpi_post_alive(struct wpi_softc *sc) 4853 { 4854 int ntries, error; 4855 4856 /* Check (again) that the radio is not disabled. */ 4857 if ((error = wpi_nic_lock(sc)) != 0) 4858 return error; 4859 4860 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4861 4862 /* NB: Runtime firmware must be up and running. */ 4863 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4864 device_printf(sc->sc_dev, 4865 "RF switch: radio disabled (%s)\n", __func__); 4866 wpi_nic_unlock(sc); 4867 return EPERM; /* :-) */ 4868 } 4869 wpi_nic_unlock(sc); 4870 4871 /* Wait for thermal sensor to calibrate. */ 4872 for (ntries = 0; ntries < 1000; ntries++) { 4873 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4874 break; 4875 DELAY(10); 4876 } 4877 4878 if (ntries == 1000) { 4879 device_printf(sc->sc_dev, 4880 "timeout waiting for thermal sensor calibration\n"); 4881 return ETIMEDOUT; 4882 } 4883 4884 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4885 return 0; 4886 } 4887 4888 /* 4889 * The firmware boot code is small and is intended to be copied directly into 4890 * the NIC internal memory (no DMA transfer). 4891 */ 4892 static int 4893 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4894 { 4895 int error, ntries; 4896 4897 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4898 4899 size /= sizeof (uint32_t); 4900 4901 if ((error = wpi_nic_lock(sc)) != 0) 4902 return error; 4903 4904 /* Copy microcode image into NIC memory. */ 4905 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4906 (const uint32_t *)ucode, size); 4907 4908 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4909 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4910 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4911 4912 /* Start boot load now. */ 4913 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4914 4915 /* Wait for transfer to complete. */ 4916 for (ntries = 0; ntries < 1000; ntries++) { 4917 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4918 DPRINTF(sc, WPI_DEBUG_HW, 4919 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4920 WPI_FH_TX_STATUS_IDLE(6), 4921 status & WPI_FH_TX_STATUS_IDLE(6)); 4922 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4923 DPRINTF(sc, WPI_DEBUG_HW, 4924 "Status Match! - ntries = %d\n", ntries); 4925 break; 4926 } 4927 DELAY(10); 4928 } 4929 if (ntries == 1000) { 4930 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4931 __func__); 4932 wpi_nic_unlock(sc); 4933 return ETIMEDOUT; 4934 } 4935 4936 /* Enable boot after power up. */ 4937 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4938 4939 wpi_nic_unlock(sc); 4940 return 0; 4941 } 4942 4943 static int 4944 wpi_load_firmware(struct wpi_softc *sc) 4945 { 4946 struct wpi_fw_info *fw = &sc->fw; 4947 struct wpi_dma_info *dma = &sc->fw_dma; 4948 int error; 4949 4950 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4951 4952 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4953 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4954 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4955 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4956 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4957 4958 /* Tell adapter where to find initialization sections. */ 4959 if ((error = wpi_nic_lock(sc)) != 0) 4960 return error; 4961 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4962 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4963 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4964 dma->paddr + WPI_FW_DATA_MAXSZ); 4965 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4966 wpi_nic_unlock(sc); 4967 4968 /* Load firmware boot code. */ 4969 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4970 if (error != 0) { 4971 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4972 __func__); 4973 return error; 4974 } 4975 4976 /* Now press "execute". */ 4977 WPI_WRITE(sc, WPI_RESET, 0); 4978 4979 /* Wait at most one second for first alive notification. */ 4980 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4981 device_printf(sc->sc_dev, 4982 "%s: timeout waiting for adapter to initialize, error %d\n", 4983 __func__, error); 4984 return error; 4985 } 4986 4987 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4988 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4989 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4990 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4991 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4992 4993 /* Tell adapter where to find runtime sections. */ 4994 if ((error = wpi_nic_lock(sc)) != 0) 4995 return error; 4996 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4997 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4998 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4999 dma->paddr + WPI_FW_DATA_MAXSZ); 5000 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 5001 WPI_FW_UPDATED | fw->main.textsz); 5002 wpi_nic_unlock(sc); 5003 5004 return 0; 5005 } 5006 5007 static int 5008 wpi_read_firmware(struct wpi_softc *sc) 5009 { 5010 const struct firmware *fp; 5011 struct wpi_fw_info *fw = &sc->fw; 5012 const struct wpi_firmware_hdr *hdr; 5013 int error; 5014 5015 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5016 5017 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5018 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5019 5020 WPI_UNLOCK(sc); 5021 fp = firmware_get(WPI_FW_NAME); 5022 WPI_LOCK(sc); 5023 5024 if (fp == NULL) { 5025 device_printf(sc->sc_dev, 5026 "could not load firmware image '%s'\n", WPI_FW_NAME); 5027 return EINVAL; 5028 } 5029 5030 sc->fw_fp = fp; 5031 5032 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5033 device_printf(sc->sc_dev, 5034 "firmware file too short: %zu bytes\n", fp->datasize); 5035 error = EINVAL; 5036 goto fail; 5037 } 5038 5039 fw->size = fp->datasize; 5040 fw->data = (const uint8_t *)fp->data; 5041 5042 /* Extract firmware header information. */ 5043 hdr = (const struct wpi_firmware_hdr *)fw->data; 5044 5045 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5046 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5047 5048 fw->main.textsz = le32toh(hdr->rtextsz); 5049 fw->main.datasz = le32toh(hdr->rdatasz); 5050 fw->init.textsz = le32toh(hdr->itextsz); 5051 fw->init.datasz = le32toh(hdr->idatasz); 5052 fw->boot.textsz = le32toh(hdr->btextsz); 5053 fw->boot.datasz = 0; 5054 5055 /* Sanity-check firmware header. */ 5056 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5057 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5058 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5059 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5060 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5061 (fw->boot.textsz & 3) != 0) { 5062 device_printf(sc->sc_dev, "invalid firmware header\n"); 5063 error = EINVAL; 5064 goto fail; 5065 } 5066 5067 /* Check that all firmware sections fit. */ 5068 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5069 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5070 device_printf(sc->sc_dev, 5071 "firmware file too short: %zu bytes\n", fw->size); 5072 error = EINVAL; 5073 goto fail; 5074 } 5075 5076 /* Get pointers to firmware sections. */ 5077 fw->main.text = (const uint8_t *)(hdr + 1); 5078 fw->main.data = fw->main.text + fw->main.textsz; 5079 fw->init.text = fw->main.data + fw->main.datasz; 5080 fw->init.data = fw->init.text + fw->init.textsz; 5081 fw->boot.text = fw->init.data + fw->init.datasz; 5082 5083 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5084 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5085 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5086 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5087 fw->main.textsz, fw->main.datasz, 5088 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5089 5090 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5091 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5092 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5093 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5094 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5095 5096 return 0; 5097 5098 fail: wpi_unload_firmware(sc); 5099 return error; 5100 } 5101 5102 /** 5103 * Free the referenced firmware image 5104 */ 5105 static void 5106 wpi_unload_firmware(struct wpi_softc *sc) 5107 { 5108 if (sc->fw_fp != NULL) { 5109 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5110 sc->fw_fp = NULL; 5111 } 5112 } 5113 5114 static int 5115 wpi_clock_wait(struct wpi_softc *sc) 5116 { 5117 int ntries; 5118 5119 /* Set "initialization complete" bit. */ 5120 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5121 5122 /* Wait for clock stabilization. */ 5123 for (ntries = 0; ntries < 2500; ntries++) { 5124 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5125 return 0; 5126 DELAY(100); 5127 } 5128 device_printf(sc->sc_dev, 5129 "%s: timeout waiting for clock stabilization\n", __func__); 5130 5131 return ETIMEDOUT; 5132 } 5133 5134 static int 5135 wpi_apm_init(struct wpi_softc *sc) 5136 { 5137 uint32_t reg; 5138 int error; 5139 5140 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5141 5142 /* Disable L0s exit timer (NMI bug workaround). */ 5143 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5144 /* Don't wait for ICH L0s (ICH bug workaround). */ 5145 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5146 5147 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5148 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5149 5150 /* Retrieve PCIe Active State Power Management (ASPM). */ 5151 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5152 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5153 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5154 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5155 else 5156 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5157 5158 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5159 5160 /* Wait for clock stabilization before accessing prph. */ 5161 if ((error = wpi_clock_wait(sc)) != 0) 5162 return error; 5163 5164 if ((error = wpi_nic_lock(sc)) != 0) 5165 return error; 5166 /* Cleanup. */ 5167 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5168 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5169 5170 /* Enable DMA and BSM (Bootstrap State Machine). */ 5171 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5172 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5173 DELAY(20); 5174 /* Disable L1-Active. */ 5175 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5176 wpi_nic_unlock(sc); 5177 5178 return 0; 5179 } 5180 5181 static void 5182 wpi_apm_stop_master(struct wpi_softc *sc) 5183 { 5184 int ntries; 5185 5186 /* Stop busmaster DMA activity. */ 5187 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5188 5189 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5190 WPI_GP_CNTRL_MAC_PS) 5191 return; /* Already asleep. */ 5192 5193 for (ntries = 0; ntries < 100; ntries++) { 5194 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5195 return; 5196 DELAY(10); 5197 } 5198 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5199 __func__); 5200 } 5201 5202 static void 5203 wpi_apm_stop(struct wpi_softc *sc) 5204 { 5205 wpi_apm_stop_master(sc); 5206 5207 /* Reset the entire device. */ 5208 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5209 DELAY(10); 5210 /* Clear "initialization complete" bit. */ 5211 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5212 } 5213 5214 static void 5215 wpi_nic_config(struct wpi_softc *sc) 5216 { 5217 uint32_t rev; 5218 5219 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5220 5221 /* voodoo from the Linux "driver".. */ 5222 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5223 if ((rev & 0xc0) == 0x40) 5224 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5225 else if (!(rev & 0x80)) 5226 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5227 5228 if (sc->cap == 0x80) 5229 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5230 5231 if ((sc->rev & 0xf0) == 0xd0) 5232 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5233 else 5234 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5235 5236 if (sc->type > 1) 5237 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5238 } 5239 5240 static int 5241 wpi_hw_init(struct wpi_softc *sc) 5242 { 5243 uint8_t chnl; 5244 int ntries, error; 5245 5246 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5247 5248 /* Clear pending interrupts. */ 5249 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5250 5251 if ((error = wpi_apm_init(sc)) != 0) { 5252 device_printf(sc->sc_dev, 5253 "%s: could not power ON adapter, error %d\n", __func__, 5254 error); 5255 return error; 5256 } 5257 5258 /* Select VMAIN power source. */ 5259 if ((error = wpi_nic_lock(sc)) != 0) 5260 return error; 5261 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5262 wpi_nic_unlock(sc); 5263 /* Spin until VMAIN gets selected. */ 5264 for (ntries = 0; ntries < 5000; ntries++) { 5265 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5266 break; 5267 DELAY(10); 5268 } 5269 if (ntries == 5000) { 5270 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5271 return ETIMEDOUT; 5272 } 5273 5274 /* Perform adapter initialization. */ 5275 wpi_nic_config(sc); 5276 5277 /* Initialize RX ring. */ 5278 if ((error = wpi_nic_lock(sc)) != 0) 5279 return error; 5280 /* Set physical address of RX ring. */ 5281 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5282 /* Set physical address of RX read pointer. */ 5283 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5284 offsetof(struct wpi_shared, next)); 5285 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5286 /* Enable RX. */ 5287 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5288 WPI_FH_RX_CONFIG_DMA_ENA | 5289 WPI_FH_RX_CONFIG_RDRBD_ENA | 5290 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5291 WPI_FH_RX_CONFIG_MAXFRAG | 5292 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5293 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5294 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5295 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5296 wpi_nic_unlock(sc); 5297 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5298 5299 /* Initialize TX rings. */ 5300 if ((error = wpi_nic_lock(sc)) != 0) 5301 return error; 5302 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5303 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5304 /* Enable all 6 TX rings. */ 5305 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5306 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5307 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5308 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5309 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5310 /* Set physical address of TX rings. */ 5311 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5312 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5313 5314 /* Enable all DMA channels. */ 5315 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5316 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5317 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5318 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5319 } 5320 wpi_nic_unlock(sc); 5321 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5322 5323 /* Clear "radio off" and "commands blocked" bits. */ 5324 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5325 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5326 5327 /* Clear pending interrupts. */ 5328 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5329 /* Enable interrupts. */ 5330 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5331 5332 /* _Really_ make sure "radio off" bit is cleared! */ 5333 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5334 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5335 5336 if ((error = wpi_load_firmware(sc)) != 0) { 5337 device_printf(sc->sc_dev, 5338 "%s: could not load firmware, error %d\n", __func__, 5339 error); 5340 return error; 5341 } 5342 /* Wait at most one second for firmware alive notification. */ 5343 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5344 device_printf(sc->sc_dev, 5345 "%s: timeout waiting for adapter to initialize, error %d\n", 5346 __func__, error); 5347 return error; 5348 } 5349 5350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5351 5352 /* Do post-firmware initialization. */ 5353 return wpi_post_alive(sc); 5354 } 5355 5356 static void 5357 wpi_hw_stop(struct wpi_softc *sc) 5358 { 5359 uint8_t chnl, qid; 5360 int ntries; 5361 5362 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5363 5364 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5365 wpi_nic_lock(sc); 5366 5367 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5368 5369 /* Disable interrupts. */ 5370 WPI_WRITE(sc, WPI_INT_MASK, 0); 5371 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5372 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5373 5374 /* Make sure we no longer hold the NIC lock. */ 5375 wpi_nic_unlock(sc); 5376 5377 if (wpi_nic_lock(sc) == 0) { 5378 /* Stop TX scheduler. */ 5379 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5380 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5381 5382 /* Stop all DMA channels. */ 5383 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5384 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5385 for (ntries = 0; ntries < 200; ntries++) { 5386 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5387 WPI_FH_TX_STATUS_IDLE(chnl)) 5388 break; 5389 DELAY(10); 5390 } 5391 } 5392 wpi_nic_unlock(sc); 5393 } 5394 5395 /* Stop RX ring. */ 5396 wpi_reset_rx_ring(sc); 5397 5398 /* Reset all TX rings. */ 5399 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5400 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5401 5402 if (wpi_nic_lock(sc) == 0) { 5403 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5404 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5405 wpi_nic_unlock(sc); 5406 } 5407 DELAY(5); 5408 /* Power OFF adapter. */ 5409 wpi_apm_stop(sc); 5410 } 5411 5412 static void 5413 wpi_radio_on(void *arg0, int pending) 5414 { 5415 struct wpi_softc *sc = arg0; 5416 struct ieee80211com *ic = &sc->sc_ic; 5417 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5418 5419 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5420 5421 WPI_LOCK(sc); 5422 callout_stop(&sc->watchdog_rfkill); 5423 WPI_UNLOCK(sc); 5424 5425 if (vap != NULL) 5426 ieee80211_init(vap); 5427 } 5428 5429 static void 5430 wpi_radio_off(void *arg0, int pending) 5431 { 5432 struct wpi_softc *sc = arg0; 5433 struct ieee80211com *ic = &sc->sc_ic; 5434 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5435 5436 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5437 5438 ieee80211_notify_radio(ic, 0); 5439 wpi_stop(sc); 5440 if (vap != NULL) 5441 ieee80211_stop(vap); 5442 5443 WPI_LOCK(sc); 5444 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5445 WPI_UNLOCK(sc); 5446 } 5447 5448 static int 5449 wpi_init(struct wpi_softc *sc) 5450 { 5451 int error = 0; 5452 5453 WPI_LOCK(sc); 5454 5455 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5456 5457 if (sc->sc_running != 0) 5458 goto end; 5459 5460 /* Check that the radio is not disabled by hardware switch. */ 5461 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5462 device_printf(sc->sc_dev, 5463 "RF switch: radio disabled (%s)\n", __func__); 5464 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5465 sc); 5466 error = EINPROGRESS; 5467 goto end; 5468 } 5469 5470 /* Read firmware images from the filesystem. */ 5471 if ((error = wpi_read_firmware(sc)) != 0) { 5472 device_printf(sc->sc_dev, 5473 "%s: could not read firmware, error %d\n", __func__, 5474 error); 5475 goto end; 5476 } 5477 5478 sc->sc_running = 1; 5479 5480 /* Initialize hardware and upload firmware. */ 5481 error = wpi_hw_init(sc); 5482 wpi_unload_firmware(sc); 5483 if (error != 0) { 5484 device_printf(sc->sc_dev, 5485 "%s: could not initialize hardware, error %d\n", __func__, 5486 error); 5487 goto fail; 5488 } 5489 5490 /* Configure adapter now that it is ready. */ 5491 if ((error = wpi_config(sc)) != 0) { 5492 device_printf(sc->sc_dev, 5493 "%s: could not configure device, error %d\n", __func__, 5494 error); 5495 goto fail; 5496 } 5497 5498 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5499 5500 WPI_UNLOCK(sc); 5501 5502 return 0; 5503 5504 fail: wpi_stop_locked(sc); 5505 5506 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5507 WPI_UNLOCK(sc); 5508 5509 return error; 5510 } 5511 5512 static void 5513 wpi_stop_locked(struct wpi_softc *sc) 5514 { 5515 5516 WPI_LOCK_ASSERT(sc); 5517 5518 if (sc->sc_running == 0) 5519 return; 5520 5521 WPI_TX_LOCK(sc); 5522 WPI_TXQ_LOCK(sc); 5523 sc->sc_running = 0; 5524 WPI_TXQ_UNLOCK(sc); 5525 WPI_TX_UNLOCK(sc); 5526 5527 WPI_TXQ_STATE_LOCK(sc); 5528 callout_stop(&sc->tx_timeout); 5529 WPI_TXQ_STATE_UNLOCK(sc); 5530 5531 WPI_RXON_LOCK(sc); 5532 callout_stop(&sc->scan_timeout); 5533 callout_stop(&sc->calib_to); 5534 WPI_RXON_UNLOCK(sc); 5535 5536 /* Power OFF hardware. */ 5537 wpi_hw_stop(sc); 5538 } 5539 5540 static void 5541 wpi_stop(struct wpi_softc *sc) 5542 { 5543 WPI_LOCK(sc); 5544 wpi_stop_locked(sc); 5545 WPI_UNLOCK(sc); 5546 } 5547 5548 /* 5549 * Callback from net80211 to start a scan. 5550 */ 5551 static void 5552 wpi_scan_start(struct ieee80211com *ic) 5553 { 5554 struct wpi_softc *sc = ic->ic_softc; 5555 5556 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5557 } 5558 5559 /* 5560 * Callback from net80211 to terminate a scan. 5561 */ 5562 static void 5563 wpi_scan_end(struct ieee80211com *ic) 5564 { 5565 struct wpi_softc *sc = ic->ic_softc; 5566 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5567 5568 if (vap->iv_state == IEEE80211_S_RUN) 5569 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5570 } 5571 5572 /** 5573 * Called by the net80211 framework to indicate to the driver 5574 * that the channel should be changed 5575 */ 5576 static void 5577 wpi_set_channel(struct ieee80211com *ic) 5578 { 5579 const struct ieee80211_channel *c = ic->ic_curchan; 5580 struct wpi_softc *sc = ic->ic_softc; 5581 int error; 5582 5583 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5584 5585 WPI_LOCK(sc); 5586 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5587 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5588 WPI_UNLOCK(sc); 5589 WPI_TX_LOCK(sc); 5590 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5591 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5592 WPI_TX_UNLOCK(sc); 5593 5594 /* 5595 * Only need to set the channel in Monitor mode. AP scanning and auth 5596 * are already taken care of by their respective firmware commands. 5597 */ 5598 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5599 WPI_RXON_LOCK(sc); 5600 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5601 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5602 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5603 WPI_RXON_24GHZ); 5604 } else { 5605 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5606 WPI_RXON_24GHZ); 5607 } 5608 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5609 device_printf(sc->sc_dev, 5610 "%s: error %d setting channel\n", __func__, 5611 error); 5612 WPI_RXON_UNLOCK(sc); 5613 } 5614 } 5615 5616 /** 5617 * Called by net80211 to indicate that we need to scan the current 5618 * channel. The channel is previously be set via the wpi_set_channel 5619 * callback. 5620 */ 5621 static void 5622 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5623 { 5624 struct ieee80211vap *vap = ss->ss_vap; 5625 struct ieee80211com *ic = vap->iv_ic; 5626 struct wpi_softc *sc = ic->ic_softc; 5627 int error; 5628 5629 WPI_RXON_LOCK(sc); 5630 error = wpi_scan(sc, ic->ic_curchan); 5631 WPI_RXON_UNLOCK(sc); 5632 if (error != 0) 5633 ieee80211_cancel_scan(vap); 5634 } 5635 5636 /** 5637 * Called by the net80211 framework to indicate 5638 * the minimum dwell time has been met, terminate the scan. 5639 * We don't actually terminate the scan as the firmware will notify 5640 * us when it's finished and we have no way to interrupt it. 5641 */ 5642 static void 5643 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5644 { 5645 /* NB: don't try to abort scan; wait for firmware to finish */ 5646 } 5647