1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 /* 24 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 25 * 26 * The 3945ABG network adapter doesn't use traditional hardware as 27 * many other adaptors do. Instead at run time the eeprom is set into a known 28 * state and told to load boot firmware. The boot firmware loads an init and a 29 * main binary firmware image into SRAM on the card via DMA. 30 * Once the firmware is loaded, the driver/hw then 31 * communicate by way of circular dma rings via the SRAM to the firmware. 32 * 33 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 34 * The 4 tx data rings allow for prioritization QoS. 35 * 36 * The rx data ring consists of 32 dma buffers. Two registers are used to 37 * indicate where in the ring the driver and the firmware are up to. The 38 * driver sets the initial read index (reg1) and the initial write index (reg2), 39 * the firmware updates the read index (reg1) on rx of a packet and fires an 40 * interrupt. The driver then processes the buffers starting at reg1 indicating 41 * to the firmware which buffers have been accessed by updating reg2. At the 42 * same time allocating new memory for the processed buffer. 43 * 44 * A similar thing happens with the tx rings. The difference is the firmware 45 * stop processing buffers once the queue is full and until confirmation 46 * of a successful transmition (tx_done) has occurred. 47 * 48 * The command ring operates in the same manner as the tx queues. 49 * 50 * All communication direct to the card (ie eeprom) is classed as Stage1 51 * communication 52 * 53 * All communication via the firmware to the card is classed as State2. 54 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 55 * firmware. The bootstrap firmware and runtime firmware are loaded 56 * from host memory via dma to the card then told to execute. From this point 57 * on the majority of communications between the driver and the card goes 58 * via the firmware. 59 */ 60 61 #include "opt_wlan.h" 62 #include "opt_wpi.h" 63 64 #include <sys/param.h> 65 #include <sys/sysctl.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 #include <sys/systm.h> 71 #include <sys/malloc.h> 72 #include <sys/queue.h> 73 #include <sys/taskqueue.h> 74 #include <sys/module.h> 75 #include <sys/bus.h> 76 #include <sys/endian.h> 77 #include <sys/linker.h> 78 #include <sys/firmware.h> 79 80 #include <machine/bus.h> 81 #include <machine/resource.h> 82 #include <sys/rman.h> 83 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 87 #include <net/bpf.h> 88 #include <net/if.h> 89 #include <net/if_var.h> 90 #include <net/if_arp.h> 91 #include <net/ethernet.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/if_types.h> 95 96 #include <netinet/in.h> 97 #include <netinet/in_systm.h> 98 #include <netinet/in_var.h> 99 #include <netinet/if_ether.h> 100 #include <netinet/ip.h> 101 102 #include <net80211/ieee80211_var.h> 103 #include <net80211/ieee80211_radiotap.h> 104 #include <net80211/ieee80211_regdomain.h> 105 #include <net80211/ieee80211_ratectl.h> 106 107 #include <dev/wpi/if_wpireg.h> 108 #include <dev/wpi/if_wpivar.h> 109 #include <dev/wpi/if_wpi_debug.h> 110 111 struct wpi_ident { 112 uint16_t vendor; 113 uint16_t device; 114 uint16_t subdevice; 115 const char *name; 116 }; 117 118 static const struct wpi_ident wpi_ident_table[] = { 119 /* The below entries support ABG regardless of the subid */ 120 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 122 /* The below entries only support BG */ 123 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 127 { 0, 0, 0, NULL } 128 }; 129 130 static int wpi_probe(device_t); 131 static int wpi_attach(device_t); 132 static void wpi_radiotap_attach(struct wpi_softc *); 133 static void wpi_sysctlattach(struct wpi_softc *); 134 static void wpi_init_beacon(struct wpi_vap *); 135 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 136 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137 const uint8_t [IEEE80211_ADDR_LEN], 138 const uint8_t [IEEE80211_ADDR_LEN]); 139 static void wpi_vap_delete(struct ieee80211vap *); 140 static int wpi_detach(device_t); 141 static int wpi_shutdown(device_t); 142 static int wpi_suspend(device_t); 143 static int wpi_resume(device_t); 144 static int wpi_nic_lock(struct wpi_softc *); 145 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 146 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 147 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 148 void **, bus_size_t, bus_size_t); 149 static void wpi_dma_contig_free(struct wpi_dma_info *); 150 static int wpi_alloc_shared(struct wpi_softc *); 151 static void wpi_free_shared(struct wpi_softc *); 152 static int wpi_alloc_fwmem(struct wpi_softc *); 153 static void wpi_free_fwmem(struct wpi_softc *); 154 static int wpi_alloc_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring(struct wpi_softc *); 156 static void wpi_update_rx_ring_ps(struct wpi_softc *); 157 static void wpi_reset_rx_ring(struct wpi_softc *); 158 static void wpi_free_rx_ring(struct wpi_softc *); 159 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 160 uint8_t); 161 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static void wpi_update_tx_ring_ps(struct wpi_softc *, 163 struct wpi_tx_ring *); 164 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 166 static int wpi_read_eeprom(struct wpi_softc *, 167 uint8_t macaddr[IEEE80211_ADDR_LEN]); 168 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 169 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *, 170 struct ieee80211_channel[]); 171 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 172 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 173 struct ieee80211_channel *); 174 static void wpi_getradiocaps(struct ieee80211com *, int, int *, 175 struct ieee80211_channel[]); 176 static int wpi_setregdomain(struct ieee80211com *, 177 struct ieee80211_regdomain *, int, 178 struct ieee80211_channel[]); 179 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 180 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 181 const uint8_t mac[IEEE80211_ADDR_LEN]); 182 static void wpi_node_free(struct ieee80211_node *); 183 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 184 const struct ieee80211_rx_stats *, 185 int, int); 186 static void wpi_restore_node(void *, struct ieee80211_node *); 187 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 188 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189 static void wpi_calib_timeout(void *); 190 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 191 struct wpi_rx_data *); 192 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 193 struct wpi_rx_data *); 194 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 195 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 196 static void wpi_notif_intr(struct wpi_softc *); 197 static void wpi_wakeup_intr(struct wpi_softc *); 198 #ifdef WPI_DEBUG 199 static void wpi_debug_registers(struct wpi_softc *); 200 #endif 201 static void wpi_fatal_intr(struct wpi_softc *); 202 static void wpi_intr(void *); 203 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 204 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 205 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 206 struct ieee80211_node *); 207 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 208 struct ieee80211_node *, 209 const struct ieee80211_bpf_params *); 210 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 211 const struct ieee80211_bpf_params *); 212 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 213 static void wpi_watchdog_rfkill(void *); 214 static void wpi_scan_timeout(void *); 215 static void wpi_tx_timeout(void *); 216 static void wpi_parent(struct ieee80211com *); 217 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 218 int); 219 static int wpi_mrr_setup(struct wpi_softc *); 220 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 221 static int wpi_add_broadcast_node(struct wpi_softc *, int); 222 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 223 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 224 static int wpi_updateedca(struct ieee80211com *); 225 static void wpi_set_promisc(struct wpi_softc *); 226 static void wpi_update_promisc(struct ieee80211com *); 227 static void wpi_update_mcast(struct ieee80211com *); 228 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 229 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 230 static void wpi_power_calibration(struct wpi_softc *); 231 static int wpi_set_txpower(struct wpi_softc *, int); 232 static int wpi_get_power_index(struct wpi_softc *, 233 struct wpi_power_group *, uint8_t, int, int); 234 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 235 static int wpi_send_btcoex(struct wpi_softc *); 236 static int wpi_send_rxon(struct wpi_softc *, int, int); 237 static int wpi_config(struct wpi_softc *); 238 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 239 struct ieee80211_channel *, uint8_t); 240 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 241 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 242 struct ieee80211_channel *); 243 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 244 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 245 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 246 static int wpi_config_beacon(struct wpi_vap *); 247 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 248 static void wpi_update_beacon(struct ieee80211vap *, int); 249 static void wpi_newassoc(struct ieee80211_node *, int); 250 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 251 static int wpi_load_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_load_key_cb(void *, struct ieee80211_node *); 254 static int wpi_set_global_keys(struct ieee80211_node *); 255 static int wpi_del_key(struct ieee80211_node *, 256 const struct ieee80211_key *); 257 static void wpi_del_key_cb(void *, struct ieee80211_node *); 258 static int wpi_process_key(struct ieee80211vap *, 259 const struct ieee80211_key *, int); 260 static int wpi_key_set(struct ieee80211vap *, 261 const struct ieee80211_key *); 262 static int wpi_key_delete(struct ieee80211vap *, 263 const struct ieee80211_key *); 264 static int wpi_post_alive(struct wpi_softc *); 265 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 266 uint32_t); 267 static int wpi_load_firmware(struct wpi_softc *); 268 static int wpi_read_firmware(struct wpi_softc *); 269 static void wpi_unload_firmware(struct wpi_softc *); 270 static int wpi_clock_wait(struct wpi_softc *); 271 static int wpi_apm_init(struct wpi_softc *); 272 static void wpi_apm_stop_master(struct wpi_softc *); 273 static void wpi_apm_stop(struct wpi_softc *); 274 static void wpi_nic_config(struct wpi_softc *); 275 static int wpi_hw_init(struct wpi_softc *); 276 static void wpi_hw_stop(struct wpi_softc *); 277 static void wpi_radio_on(void *, int); 278 static void wpi_radio_off(void *, int); 279 static int wpi_init(struct wpi_softc *); 280 static void wpi_stop_locked(struct wpi_softc *); 281 static void wpi_stop(struct wpi_softc *); 282 static void wpi_scan_start(struct ieee80211com *); 283 static void wpi_scan_end(struct ieee80211com *); 284 static void wpi_set_channel(struct ieee80211com *); 285 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 286 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 287 288 static device_method_t wpi_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, wpi_probe), 291 DEVMETHOD(device_attach, wpi_attach), 292 DEVMETHOD(device_detach, wpi_detach), 293 DEVMETHOD(device_shutdown, wpi_shutdown), 294 DEVMETHOD(device_suspend, wpi_suspend), 295 DEVMETHOD(device_resume, wpi_resume), 296 297 DEVMETHOD_END 298 }; 299 300 static driver_t wpi_driver = { 301 "wpi", 302 wpi_methods, 303 sizeof (struct wpi_softc) 304 }; 305 static devclass_t wpi_devclass; 306 307 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 308 309 MODULE_VERSION(wpi, 1); 310 311 MODULE_DEPEND(wpi, pci, 1, 1, 1); 312 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 313 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 314 315 static int 316 wpi_probe(device_t dev) 317 { 318 const struct wpi_ident *ident; 319 320 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 321 if (pci_get_vendor(dev) == ident->vendor && 322 pci_get_device(dev) == ident->device) { 323 device_set_desc(dev, ident->name); 324 return (BUS_PROBE_DEFAULT); 325 } 326 } 327 return ENXIO; 328 } 329 330 static int 331 wpi_attach(device_t dev) 332 { 333 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 334 struct ieee80211com *ic; 335 uint8_t i; 336 int error, rid; 337 #ifdef WPI_DEBUG 338 int supportsa = 1; 339 const struct wpi_ident *ident; 340 #endif 341 342 sc->sc_dev = dev; 343 344 #ifdef WPI_DEBUG 345 error = resource_int_value(device_get_name(sc->sc_dev), 346 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 347 if (error != 0) 348 sc->sc_debug = 0; 349 #else 350 sc->sc_debug = 0; 351 #endif 352 353 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 354 355 /* 356 * Get the offset of the PCI Express Capability Structure in PCI 357 * Configuration Space. 358 */ 359 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 360 if (error != 0) { 361 device_printf(dev, "PCIe capability structure not found!\n"); 362 return error; 363 } 364 365 /* 366 * Some card's only support 802.11b/g not a, check to see if 367 * this is one such card. A 0x0 in the subdevice table indicates 368 * the entire subdevice range is to be ignored. 369 */ 370 #ifdef WPI_DEBUG 371 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 372 if (ident->subdevice && 373 pci_get_subdevice(dev) == ident->subdevice) { 374 supportsa = 0; 375 break; 376 } 377 } 378 #endif 379 380 /* Clear device-specific "PCI retry timeout" register (41h). */ 381 pci_write_config(dev, 0x41, 0, 1); 382 383 /* Enable bus-mastering. */ 384 pci_enable_busmaster(dev); 385 386 rid = PCIR_BAR(0); 387 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 388 RF_ACTIVE); 389 if (sc->mem == NULL) { 390 device_printf(dev, "can't map mem space\n"); 391 return ENOMEM; 392 } 393 sc->sc_st = rman_get_bustag(sc->mem); 394 sc->sc_sh = rman_get_bushandle(sc->mem); 395 396 rid = 1; 397 if (pci_alloc_msi(dev, &rid) == 0) 398 rid = 1; 399 else 400 rid = 0; 401 /* Install interrupt handler. */ 402 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 403 (rid != 0 ? 0 : RF_SHAREABLE)); 404 if (sc->irq == NULL) { 405 device_printf(dev, "can't map interrupt\n"); 406 error = ENOMEM; 407 goto fail; 408 } 409 410 WPI_LOCK_INIT(sc); 411 WPI_TX_LOCK_INIT(sc); 412 WPI_RXON_LOCK_INIT(sc); 413 WPI_NT_LOCK_INIT(sc); 414 WPI_TXQ_LOCK_INIT(sc); 415 WPI_TXQ_STATE_LOCK_INIT(sc); 416 417 /* Allocate DMA memory for firmware transfers. */ 418 if ((error = wpi_alloc_fwmem(sc)) != 0) { 419 device_printf(dev, 420 "could not allocate memory for firmware, error %d\n", 421 error); 422 goto fail; 423 } 424 425 /* Allocate shared page. */ 426 if ((error = wpi_alloc_shared(sc)) != 0) { 427 device_printf(dev, "could not allocate shared page\n"); 428 goto fail; 429 } 430 431 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 432 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 433 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 434 device_printf(dev, 435 "could not allocate TX ring %d, error %d\n", i, 436 error); 437 goto fail; 438 } 439 } 440 441 /* Allocate RX ring. */ 442 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 443 device_printf(dev, "could not allocate RX ring, error %d\n", 444 error); 445 goto fail; 446 } 447 448 /* Clear pending interrupts. */ 449 WPI_WRITE(sc, WPI_INT, 0xffffffff); 450 451 ic = &sc->sc_ic; 452 ic->ic_softc = sc; 453 ic->ic_name = device_get_nameunit(dev); 454 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 455 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 456 457 /* Set device capabilities. */ 458 ic->ic_caps = 459 IEEE80211_C_STA /* station mode supported */ 460 | IEEE80211_C_IBSS /* IBSS mode supported */ 461 | IEEE80211_C_HOSTAP /* Host access point mode */ 462 | IEEE80211_C_MONITOR /* monitor mode supported */ 463 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 464 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 465 | IEEE80211_C_TXFRAG /* handle tx frags */ 466 | IEEE80211_C_TXPMGT /* tx power management */ 467 | IEEE80211_C_SHSLOT /* short slot time supported */ 468 | IEEE80211_C_WPA /* 802.11i */ 469 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 470 | IEEE80211_C_WME /* 802.11e */ 471 | IEEE80211_C_PMGT /* Station-side power mgmt */ 472 ; 473 474 ic->ic_cryptocaps = 475 IEEE80211_CRYPTO_AES_CCM; 476 477 /* 478 * Read in the eeprom and also setup the channels for 479 * net80211. We don't set the rates as net80211 does this for us 480 */ 481 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 482 device_printf(dev, "could not read EEPROM, error %d\n", 483 error); 484 goto fail; 485 } 486 487 #ifdef WPI_DEBUG 488 if (bootverbose) { 489 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 490 sc->domain); 491 device_printf(sc->sc_dev, "Hardware Type: %c\n", 492 sc->type > 1 ? 'B': '?'); 493 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 494 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 495 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 496 supportsa ? "does" : "does not"); 497 498 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 499 check what sc->rev really represents - benjsc 20070615 */ 500 } 501 #endif 502 503 ieee80211_ifattach(ic); 504 ic->ic_vap_create = wpi_vap_create; 505 ic->ic_vap_delete = wpi_vap_delete; 506 ic->ic_parent = wpi_parent; 507 ic->ic_raw_xmit = wpi_raw_xmit; 508 ic->ic_transmit = wpi_transmit; 509 ic->ic_node_alloc = wpi_node_alloc; 510 sc->sc_node_free = ic->ic_node_free; 511 ic->ic_node_free = wpi_node_free; 512 ic->ic_wme.wme_update = wpi_updateedca; 513 ic->ic_update_promisc = wpi_update_promisc; 514 ic->ic_update_mcast = wpi_update_mcast; 515 ic->ic_newassoc = wpi_newassoc; 516 ic->ic_scan_start = wpi_scan_start; 517 ic->ic_scan_end = wpi_scan_end; 518 ic->ic_set_channel = wpi_set_channel; 519 ic->ic_scan_curchan = wpi_scan_curchan; 520 ic->ic_scan_mindwell = wpi_scan_mindwell; 521 ic->ic_getradiocaps = wpi_getradiocaps; 522 ic->ic_setregdomain = wpi_setregdomain; 523 524 sc->sc_update_rx_ring = wpi_update_rx_ring; 525 sc->sc_update_tx_ring = wpi_update_tx_ring; 526 527 wpi_radiotap_attach(sc); 528 529 /* Setup Tx status flags (constant). */ 530 sc->sc_txs.flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 531 IEEE80211_RATECTL_STATUS_LONG_RETRY; 532 533 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 534 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 535 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 536 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 537 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 538 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 539 540 wpi_sysctlattach(sc); 541 542 /* 543 * Hook our interrupt after all initialization is complete. 544 */ 545 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 546 NULL, wpi_intr, sc, &sc->sc_ih); 547 if (error != 0) { 548 device_printf(dev, "can't establish interrupt, error %d\n", 549 error); 550 goto fail; 551 } 552 553 if (bootverbose) 554 ieee80211_announce(ic); 555 556 #ifdef WPI_DEBUG 557 if (sc->sc_debug & WPI_DEBUG_HW) 558 ieee80211_announce_channels(ic); 559 #endif 560 561 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 562 return 0; 563 564 fail: wpi_detach(dev); 565 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 566 return error; 567 } 568 569 /* 570 * Attach the interface to 802.11 radiotap. 571 */ 572 static void 573 wpi_radiotap_attach(struct wpi_softc *sc) 574 { 575 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 576 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 577 578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 579 ieee80211_radiotap_attach(&sc->sc_ic, 580 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 581 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 582 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 583 } 584 585 static void 586 wpi_sysctlattach(struct wpi_softc *sc) 587 { 588 #ifdef WPI_DEBUG 589 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 590 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 591 592 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 593 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 594 "control debugging printfs"); 595 #endif 596 } 597 598 static void 599 wpi_init_beacon(struct wpi_vap *wvp) 600 { 601 struct wpi_buf *bcn = &wvp->wv_bcbuf; 602 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 603 604 cmd->id = WPI_ID_BROADCAST; 605 cmd->ofdm_mask = 0xff; 606 cmd->cck_mask = 0x0f; 607 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 608 609 /* 610 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 611 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 612 */ 613 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 614 615 bcn->code = WPI_CMD_SET_BEACON; 616 bcn->ac = WPI_CMD_QUEUE_NUM; 617 bcn->size = sizeof(struct wpi_cmd_beacon); 618 } 619 620 static struct ieee80211vap * 621 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 622 enum ieee80211_opmode opmode, int flags, 623 const uint8_t bssid[IEEE80211_ADDR_LEN], 624 const uint8_t mac[IEEE80211_ADDR_LEN]) 625 { 626 struct wpi_vap *wvp; 627 struct ieee80211vap *vap; 628 629 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 630 return NULL; 631 632 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 633 vap = &wvp->wv_vap; 634 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 635 636 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 637 WPI_VAP_LOCK_INIT(wvp); 638 wpi_init_beacon(wvp); 639 } 640 641 /* Override with driver methods. */ 642 vap->iv_key_set = wpi_key_set; 643 vap->iv_key_delete = wpi_key_delete; 644 if (opmode == IEEE80211_M_IBSS) { 645 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 646 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 647 } 648 wvp->wv_newstate = vap->iv_newstate; 649 vap->iv_newstate = wpi_newstate; 650 vap->iv_update_beacon = wpi_update_beacon; 651 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 652 653 ieee80211_ratectl_init(vap); 654 /* Complete setup. */ 655 ieee80211_vap_attach(vap, ieee80211_media_change, 656 ieee80211_media_status, mac); 657 ic->ic_opmode = opmode; 658 return vap; 659 } 660 661 static void 662 wpi_vap_delete(struct ieee80211vap *vap) 663 { 664 struct wpi_vap *wvp = WPI_VAP(vap); 665 struct wpi_buf *bcn = &wvp->wv_bcbuf; 666 enum ieee80211_opmode opmode = vap->iv_opmode; 667 668 ieee80211_ratectl_deinit(vap); 669 ieee80211_vap_detach(vap); 670 671 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 672 if (bcn->m != NULL) 673 m_freem(bcn->m); 674 675 WPI_VAP_LOCK_DESTROY(wvp); 676 } 677 678 free(wvp, M_80211_VAP); 679 } 680 681 static int 682 wpi_detach(device_t dev) 683 { 684 struct wpi_softc *sc = device_get_softc(dev); 685 struct ieee80211com *ic = &sc->sc_ic; 686 uint8_t qid; 687 688 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 689 690 if (ic->ic_vap_create == wpi_vap_create) { 691 ieee80211_draintask(ic, &sc->sc_radioon_task); 692 ieee80211_draintask(ic, &sc->sc_radiooff_task); 693 694 wpi_stop(sc); 695 696 callout_drain(&sc->watchdog_rfkill); 697 callout_drain(&sc->tx_timeout); 698 callout_drain(&sc->scan_timeout); 699 callout_drain(&sc->calib_to); 700 ieee80211_ifdetach(ic); 701 } 702 703 /* Uninstall interrupt handler. */ 704 if (sc->irq != NULL) { 705 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 706 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 707 sc->irq); 708 pci_release_msi(dev); 709 } 710 711 if (sc->txq[0].data_dmat) { 712 /* Free DMA resources. */ 713 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 714 wpi_free_tx_ring(sc, &sc->txq[qid]); 715 716 wpi_free_rx_ring(sc); 717 wpi_free_shared(sc); 718 } 719 720 if (sc->fw_dma.tag) 721 wpi_free_fwmem(sc); 722 723 if (sc->mem != NULL) 724 bus_release_resource(dev, SYS_RES_MEMORY, 725 rman_get_rid(sc->mem), sc->mem); 726 727 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 728 WPI_TXQ_STATE_LOCK_DESTROY(sc); 729 WPI_TXQ_LOCK_DESTROY(sc); 730 WPI_NT_LOCK_DESTROY(sc); 731 WPI_RXON_LOCK_DESTROY(sc); 732 WPI_TX_LOCK_DESTROY(sc); 733 WPI_LOCK_DESTROY(sc); 734 return 0; 735 } 736 737 static int 738 wpi_shutdown(device_t dev) 739 { 740 struct wpi_softc *sc = device_get_softc(dev); 741 742 wpi_stop(sc); 743 return 0; 744 } 745 746 static int 747 wpi_suspend(device_t dev) 748 { 749 struct wpi_softc *sc = device_get_softc(dev); 750 struct ieee80211com *ic = &sc->sc_ic; 751 752 ieee80211_suspend_all(ic); 753 return 0; 754 } 755 756 static int 757 wpi_resume(device_t dev) 758 { 759 struct wpi_softc *sc = device_get_softc(dev); 760 struct ieee80211com *ic = &sc->sc_ic; 761 762 /* Clear device-specific "PCI retry timeout" register (41h). */ 763 pci_write_config(dev, 0x41, 0, 1); 764 765 ieee80211_resume_all(ic); 766 return 0; 767 } 768 769 /* 770 * Grab exclusive access to NIC memory. 771 */ 772 static int 773 wpi_nic_lock(struct wpi_softc *sc) 774 { 775 int ntries; 776 777 /* Request exclusive access to NIC. */ 778 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 779 780 /* Spin until we actually get the lock. */ 781 for (ntries = 0; ntries < 1000; ntries++) { 782 if ((WPI_READ(sc, WPI_GP_CNTRL) & 783 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 784 WPI_GP_CNTRL_MAC_ACCESS_ENA) 785 return 0; 786 DELAY(10); 787 } 788 789 device_printf(sc->sc_dev, "could not lock memory\n"); 790 791 return ETIMEDOUT; 792 } 793 794 /* 795 * Release lock on NIC memory. 796 */ 797 static __inline void 798 wpi_nic_unlock(struct wpi_softc *sc) 799 { 800 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 801 } 802 803 static __inline uint32_t 804 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 805 { 806 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 807 WPI_BARRIER_READ_WRITE(sc); 808 return WPI_READ(sc, WPI_PRPH_RDATA); 809 } 810 811 static __inline void 812 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 813 { 814 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 815 WPI_BARRIER_WRITE(sc); 816 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 817 } 818 819 static __inline void 820 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 821 { 822 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 823 } 824 825 static __inline void 826 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 827 { 828 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 829 } 830 831 static __inline void 832 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 833 const uint32_t *data, uint32_t count) 834 { 835 for (; count != 0; count--, data++, addr += 4) 836 wpi_prph_write(sc, addr, *data); 837 } 838 839 static __inline uint32_t 840 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 841 { 842 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 843 WPI_BARRIER_READ_WRITE(sc); 844 return WPI_READ(sc, WPI_MEM_RDATA); 845 } 846 847 static __inline void 848 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 849 int count) 850 { 851 for (; count > 0; count--, addr += 4) 852 *data++ = wpi_mem_read(sc, addr); 853 } 854 855 static int 856 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 857 { 858 uint8_t *out = data; 859 uint32_t val; 860 int error, ntries; 861 862 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 863 864 if ((error = wpi_nic_lock(sc)) != 0) 865 return error; 866 867 for (; count > 0; count -= 2, addr++) { 868 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 869 for (ntries = 0; ntries < 10; ntries++) { 870 val = WPI_READ(sc, WPI_EEPROM); 871 if (val & WPI_EEPROM_READ_VALID) 872 break; 873 DELAY(5); 874 } 875 if (ntries == 10) { 876 device_printf(sc->sc_dev, 877 "timeout reading ROM at 0x%x\n", addr); 878 return ETIMEDOUT; 879 } 880 *out++= val >> 16; 881 if (count > 1) 882 *out ++= val >> 24; 883 } 884 885 wpi_nic_unlock(sc); 886 887 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 888 889 return 0; 890 } 891 892 static void 893 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 894 { 895 if (error != 0) 896 return; 897 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 898 *(bus_addr_t *)arg = segs[0].ds_addr; 899 } 900 901 /* 902 * Allocates a contiguous block of dma memory of the requested size and 903 * alignment. 904 */ 905 static int 906 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 907 void **kvap, bus_size_t size, bus_size_t alignment) 908 { 909 int error; 910 911 dma->tag = NULL; 912 dma->size = size; 913 914 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 915 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 916 1, size, 0, NULL, NULL, &dma->tag); 917 if (error != 0) 918 goto fail; 919 920 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 921 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 922 if (error != 0) 923 goto fail; 924 925 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 926 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 927 if (error != 0) 928 goto fail; 929 930 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 931 932 if (kvap != NULL) 933 *kvap = dma->vaddr; 934 935 return 0; 936 937 fail: wpi_dma_contig_free(dma); 938 return error; 939 } 940 941 static void 942 wpi_dma_contig_free(struct wpi_dma_info *dma) 943 { 944 if (dma->vaddr != NULL) { 945 bus_dmamap_sync(dma->tag, dma->map, 946 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 947 bus_dmamap_unload(dma->tag, dma->map); 948 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 949 dma->vaddr = NULL; 950 } 951 if (dma->tag != NULL) { 952 bus_dma_tag_destroy(dma->tag); 953 dma->tag = NULL; 954 } 955 } 956 957 /* 958 * Allocate a shared page between host and NIC. 959 */ 960 static int 961 wpi_alloc_shared(struct wpi_softc *sc) 962 { 963 /* Shared buffer must be aligned on a 4KB boundary. */ 964 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 965 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 966 } 967 968 static void 969 wpi_free_shared(struct wpi_softc *sc) 970 { 971 wpi_dma_contig_free(&sc->shared_dma); 972 } 973 974 /* 975 * Allocate DMA-safe memory for firmware transfer. 976 */ 977 static int 978 wpi_alloc_fwmem(struct wpi_softc *sc) 979 { 980 /* Must be aligned on a 16-byte boundary. */ 981 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 982 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 983 } 984 985 static void 986 wpi_free_fwmem(struct wpi_softc *sc) 987 { 988 wpi_dma_contig_free(&sc->fw_dma); 989 } 990 991 static int 992 wpi_alloc_rx_ring(struct wpi_softc *sc) 993 { 994 struct wpi_rx_ring *ring = &sc->rxq; 995 bus_size_t size; 996 int i, error; 997 998 ring->cur = 0; 999 ring->update = 0; 1000 1001 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1002 1003 /* Allocate RX descriptors (16KB aligned.) */ 1004 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1005 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1006 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1007 if (error != 0) { 1008 device_printf(sc->sc_dev, 1009 "%s: could not allocate RX ring DMA memory, error %d\n", 1010 __func__, error); 1011 goto fail; 1012 } 1013 1014 /* Create RX buffer DMA tag. */ 1015 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1016 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1017 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1018 if (error != 0) { 1019 device_printf(sc->sc_dev, 1020 "%s: could not create RX buf DMA tag, error %d\n", 1021 __func__, error); 1022 goto fail; 1023 } 1024 1025 /* 1026 * Allocate and map RX buffers. 1027 */ 1028 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1029 struct wpi_rx_data *data = &ring->data[i]; 1030 bus_addr_t paddr; 1031 1032 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1033 if (error != 0) { 1034 device_printf(sc->sc_dev, 1035 "%s: could not create RX buf DMA map, error %d\n", 1036 __func__, error); 1037 goto fail; 1038 } 1039 1040 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1041 if (data->m == NULL) { 1042 device_printf(sc->sc_dev, 1043 "%s: could not allocate RX mbuf\n", __func__); 1044 error = ENOBUFS; 1045 goto fail; 1046 } 1047 1048 error = bus_dmamap_load(ring->data_dmat, data->map, 1049 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1050 &paddr, BUS_DMA_NOWAIT); 1051 if (error != 0 && error != EFBIG) { 1052 device_printf(sc->sc_dev, 1053 "%s: can't map mbuf (error %d)\n", __func__, 1054 error); 1055 goto fail; 1056 } 1057 1058 /* Set physical address of RX buffer. */ 1059 ring->desc[i] = htole32(paddr); 1060 } 1061 1062 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1063 BUS_DMASYNC_PREWRITE); 1064 1065 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1066 1067 return 0; 1068 1069 fail: wpi_free_rx_ring(sc); 1070 1071 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1072 1073 return error; 1074 } 1075 1076 static void 1077 wpi_update_rx_ring(struct wpi_softc *sc) 1078 { 1079 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1080 } 1081 1082 static void 1083 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1084 { 1085 struct wpi_rx_ring *ring = &sc->rxq; 1086 1087 if (ring->update != 0) { 1088 /* Wait for INT_WAKEUP event. */ 1089 return; 1090 } 1091 1092 WPI_TXQ_LOCK(sc); 1093 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1094 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1095 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1096 __func__); 1097 ring->update = 1; 1098 } else { 1099 wpi_update_rx_ring(sc); 1100 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1101 } 1102 WPI_TXQ_UNLOCK(sc); 1103 } 1104 1105 static void 1106 wpi_reset_rx_ring(struct wpi_softc *sc) 1107 { 1108 struct wpi_rx_ring *ring = &sc->rxq; 1109 int ntries; 1110 1111 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1112 1113 if (wpi_nic_lock(sc) == 0) { 1114 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1115 for (ntries = 0; ntries < 1000; ntries++) { 1116 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1117 WPI_FH_RX_STATUS_IDLE) 1118 break; 1119 DELAY(10); 1120 } 1121 wpi_nic_unlock(sc); 1122 } 1123 1124 ring->cur = 0; 1125 ring->update = 0; 1126 } 1127 1128 static void 1129 wpi_free_rx_ring(struct wpi_softc *sc) 1130 { 1131 struct wpi_rx_ring *ring = &sc->rxq; 1132 int i; 1133 1134 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1135 1136 wpi_dma_contig_free(&ring->desc_dma); 1137 1138 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1139 struct wpi_rx_data *data = &ring->data[i]; 1140 1141 if (data->m != NULL) { 1142 bus_dmamap_sync(ring->data_dmat, data->map, 1143 BUS_DMASYNC_POSTREAD); 1144 bus_dmamap_unload(ring->data_dmat, data->map); 1145 m_freem(data->m); 1146 data->m = NULL; 1147 } 1148 if (data->map != NULL) 1149 bus_dmamap_destroy(ring->data_dmat, data->map); 1150 } 1151 if (ring->data_dmat != NULL) { 1152 bus_dma_tag_destroy(ring->data_dmat); 1153 ring->data_dmat = NULL; 1154 } 1155 } 1156 1157 static int 1158 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1159 { 1160 bus_addr_t paddr; 1161 bus_size_t size; 1162 int i, error; 1163 1164 ring->qid = qid; 1165 ring->queued = 0; 1166 ring->cur = 0; 1167 ring->pending = 0; 1168 ring->update = 0; 1169 1170 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1171 1172 /* Allocate TX descriptors (16KB aligned.) */ 1173 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1174 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1175 size, WPI_RING_DMA_ALIGN); 1176 if (error != 0) { 1177 device_printf(sc->sc_dev, 1178 "%s: could not allocate TX ring DMA memory, error %d\n", 1179 __func__, error); 1180 goto fail; 1181 } 1182 1183 /* Update shared area with ring physical address. */ 1184 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1185 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1186 BUS_DMASYNC_PREWRITE); 1187 1188 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1189 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1190 size, 4); 1191 if (error != 0) { 1192 device_printf(sc->sc_dev, 1193 "%s: could not allocate TX cmd DMA memory, error %d\n", 1194 __func__, error); 1195 goto fail; 1196 } 1197 1198 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1199 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1200 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not create TX buf DMA tag, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 paddr = ring->cmd_dma.paddr; 1209 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1210 struct wpi_tx_data *data = &ring->data[i]; 1211 1212 data->cmd_paddr = paddr; 1213 paddr += sizeof (struct wpi_tx_cmd); 1214 1215 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1216 if (error != 0) { 1217 device_printf(sc->sc_dev, 1218 "%s: could not create TX buf DMA map, error %d\n", 1219 __func__, error); 1220 goto fail; 1221 } 1222 } 1223 1224 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1225 1226 return 0; 1227 1228 fail: wpi_free_tx_ring(sc, ring); 1229 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1230 return error; 1231 } 1232 1233 static void 1234 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1235 { 1236 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1237 } 1238 1239 static void 1240 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1241 { 1242 1243 if (ring->update != 0) { 1244 /* Wait for INT_WAKEUP event. */ 1245 return; 1246 } 1247 1248 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1249 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1250 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1251 __func__, ring->qid); 1252 ring->update = 1; 1253 } else { 1254 wpi_update_tx_ring(sc, ring); 1255 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1256 } 1257 } 1258 1259 static void 1260 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1261 { 1262 int i; 1263 1264 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1265 1266 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1267 struct wpi_tx_data *data = &ring->data[i]; 1268 1269 if (data->m != NULL) { 1270 bus_dmamap_sync(ring->data_dmat, data->map, 1271 BUS_DMASYNC_POSTWRITE); 1272 bus_dmamap_unload(ring->data_dmat, data->map); 1273 m_freem(data->m); 1274 data->m = NULL; 1275 } 1276 if (data->ni != NULL) { 1277 ieee80211_free_node(data->ni); 1278 data->ni = NULL; 1279 } 1280 } 1281 /* Clear TX descriptors. */ 1282 memset(ring->desc, 0, ring->desc_dma.size); 1283 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1284 BUS_DMASYNC_PREWRITE); 1285 ring->queued = 0; 1286 ring->cur = 0; 1287 ring->pending = 0; 1288 ring->update = 0; 1289 } 1290 1291 static void 1292 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1293 { 1294 int i; 1295 1296 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1297 1298 wpi_dma_contig_free(&ring->desc_dma); 1299 wpi_dma_contig_free(&ring->cmd_dma); 1300 1301 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1302 struct wpi_tx_data *data = &ring->data[i]; 1303 1304 if (data->m != NULL) { 1305 bus_dmamap_sync(ring->data_dmat, data->map, 1306 BUS_DMASYNC_POSTWRITE); 1307 bus_dmamap_unload(ring->data_dmat, data->map); 1308 m_freem(data->m); 1309 } 1310 if (data->map != NULL) 1311 bus_dmamap_destroy(ring->data_dmat, data->map); 1312 } 1313 if (ring->data_dmat != NULL) { 1314 bus_dma_tag_destroy(ring->data_dmat); 1315 ring->data_dmat = NULL; 1316 } 1317 } 1318 1319 /* 1320 * Extract various information from EEPROM. 1321 */ 1322 static int 1323 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1324 { 1325 #define WPI_CHK(res) do { \ 1326 if ((error = res) != 0) \ 1327 goto fail; \ 1328 } while (0) 1329 uint8_t i; 1330 int error; 1331 1332 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1333 1334 /* Adapter has to be powered on for EEPROM access to work. */ 1335 if ((error = wpi_apm_init(sc)) != 0) { 1336 device_printf(sc->sc_dev, 1337 "%s: could not power ON adapter, error %d\n", __func__, 1338 error); 1339 return error; 1340 } 1341 1342 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1343 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1344 error = EIO; 1345 goto fail; 1346 } 1347 /* Clear HW ownership of EEPROM. */ 1348 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1349 1350 /* Read the hardware capabilities, revision and SKU type. */ 1351 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1352 sizeof(sc->cap))); 1353 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1354 sizeof(sc->rev))); 1355 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1356 sizeof(sc->type))); 1357 1358 sc->rev = le16toh(sc->rev); 1359 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1360 sc->rev, sc->type); 1361 1362 /* Read the regulatory domain (4 ASCII characters.) */ 1363 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1364 sizeof(sc->domain))); 1365 1366 /* Read MAC address. */ 1367 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1368 IEEE80211_ADDR_LEN)); 1369 1370 /* Read the list of authorized channels. */ 1371 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1372 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1373 1374 /* Read the list of TX power groups. */ 1375 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1376 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1377 1378 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1379 1380 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1381 __func__); 1382 1383 return error; 1384 #undef WPI_CHK 1385 } 1386 1387 /* 1388 * Translate EEPROM flags to net80211. 1389 */ 1390 static uint32_t 1391 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1392 { 1393 uint32_t nflags; 1394 1395 nflags = 0; 1396 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1397 nflags |= IEEE80211_CHAN_PASSIVE; 1398 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1399 nflags |= IEEE80211_CHAN_NOADHOC; 1400 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1401 nflags |= IEEE80211_CHAN_DFS; 1402 /* XXX apparently IBSS may still be marked */ 1403 nflags |= IEEE80211_CHAN_NOADHOC; 1404 } 1405 1406 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1407 if (nflags & IEEE80211_CHAN_NOADHOC) 1408 nflags |= IEEE80211_CHAN_NOHOSTAP; 1409 1410 return nflags; 1411 } 1412 1413 static void 1414 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans, 1415 int *nchans, struct ieee80211_channel chans[]) 1416 { 1417 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1418 const struct wpi_chan_band *band = &wpi_bands[n]; 1419 uint32_t nflags; 1420 uint8_t bands[IEEE80211_MODE_BYTES]; 1421 uint8_t chan, i; 1422 int error; 1423 1424 memset(bands, 0, sizeof(bands)); 1425 1426 if (n == 0) { 1427 setbit(bands, IEEE80211_MODE_11B); 1428 setbit(bands, IEEE80211_MODE_11G); 1429 } else 1430 setbit(bands, IEEE80211_MODE_11A); 1431 1432 for (i = 0; i < band->nchan; i++) { 1433 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1434 DPRINTF(sc, WPI_DEBUG_EEPROM, 1435 "Channel Not Valid: %d, band %d\n", 1436 band->chan[i],n); 1437 continue; 1438 } 1439 1440 chan = band->chan[i]; 1441 nflags = wpi_eeprom_channel_flags(&channels[i]); 1442 error = ieee80211_add_channel(chans, maxchans, nchans, 1443 chan, 0, channels[i].maxpwr, nflags, bands); 1444 if (error != 0) 1445 break; 1446 1447 /* Save maximum allowed TX power for this channel. */ 1448 sc->maxpwr[chan] = channels[i].maxpwr; 1449 1450 DPRINTF(sc, WPI_DEBUG_EEPROM, 1451 "adding chan %d flags=0x%x maxpwr=%d, offset %d\n", 1452 chan, channels[i].flags, sc->maxpwr[chan], *nchans); 1453 } 1454 } 1455 1456 /** 1457 * Read the eeprom to find out what channels are valid for the given 1458 * band and update net80211 with what we find. 1459 */ 1460 static int 1461 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1462 { 1463 struct ieee80211com *ic = &sc->sc_ic; 1464 const struct wpi_chan_band *band = &wpi_bands[n]; 1465 int error; 1466 1467 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1468 1469 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1470 band->nchan * sizeof (struct wpi_eeprom_chan)); 1471 if (error != 0) { 1472 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1473 return error; 1474 } 1475 1476 wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 1477 ic->ic_channels); 1478 1479 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1480 1481 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1482 1483 return 0; 1484 } 1485 1486 static struct wpi_eeprom_chan * 1487 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1488 { 1489 int i, j; 1490 1491 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1492 for (i = 0; i < wpi_bands[j].nchan; i++) 1493 if (wpi_bands[j].chan[i] == c->ic_ieee && 1494 ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) 1495 return &sc->eeprom_channels[j][i]; 1496 1497 return NULL; 1498 } 1499 1500 static void 1501 wpi_getradiocaps(struct ieee80211com *ic, 1502 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1503 { 1504 struct wpi_softc *sc = ic->ic_softc; 1505 int i; 1506 1507 /* Parse the list of authorized channels. */ 1508 for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++) 1509 wpi_read_eeprom_band(sc, i, maxchans, nchans, chans); 1510 } 1511 1512 /* 1513 * Enforce flags read from EEPROM. 1514 */ 1515 static int 1516 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1517 int nchan, struct ieee80211_channel chans[]) 1518 { 1519 struct wpi_softc *sc = ic->ic_softc; 1520 int i; 1521 1522 for (i = 0; i < nchan; i++) { 1523 struct ieee80211_channel *c = &chans[i]; 1524 struct wpi_eeprom_chan *channel; 1525 1526 channel = wpi_find_eeprom_channel(sc, c); 1527 if (channel == NULL) { 1528 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1529 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1530 return EINVAL; 1531 } 1532 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1533 } 1534 1535 return 0; 1536 } 1537 1538 static int 1539 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1540 { 1541 struct wpi_power_group *group = &sc->groups[n]; 1542 struct wpi_eeprom_group rgroup; 1543 int i, error; 1544 1545 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1546 1547 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1548 &rgroup, sizeof rgroup)) != 0) { 1549 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1550 return error; 1551 } 1552 1553 /* Save TX power group information. */ 1554 group->chan = rgroup.chan; 1555 group->maxpwr = rgroup.maxpwr; 1556 /* Retrieve temperature at which the samples were taken. */ 1557 group->temp = (int16_t)le16toh(rgroup.temp); 1558 1559 DPRINTF(sc, WPI_DEBUG_EEPROM, 1560 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1561 group->maxpwr, group->temp); 1562 1563 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1564 group->samples[i].index = rgroup.samples[i].index; 1565 group->samples[i].power = rgroup.samples[i].power; 1566 1567 DPRINTF(sc, WPI_DEBUG_EEPROM, 1568 "\tsample %d: index=%d power=%d\n", i, 1569 group->samples[i].index, group->samples[i].power); 1570 } 1571 1572 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1573 1574 return 0; 1575 } 1576 1577 static __inline uint8_t 1578 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1579 { 1580 uint8_t newid = WPI_ID_IBSS_MIN; 1581 1582 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1583 if ((sc->nodesmsk & (1 << newid)) == 0) { 1584 sc->nodesmsk |= 1 << newid; 1585 return newid; 1586 } 1587 } 1588 1589 return WPI_ID_UNDEFINED; 1590 } 1591 1592 static __inline uint8_t 1593 wpi_add_node_entry_sta(struct wpi_softc *sc) 1594 { 1595 sc->nodesmsk |= 1 << WPI_ID_BSS; 1596 1597 return WPI_ID_BSS; 1598 } 1599 1600 static __inline int 1601 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1602 { 1603 if (id == WPI_ID_UNDEFINED) 1604 return 0; 1605 1606 return (sc->nodesmsk >> id) & 1; 1607 } 1608 1609 static __inline void 1610 wpi_clear_node_table(struct wpi_softc *sc) 1611 { 1612 sc->nodesmsk = 0; 1613 } 1614 1615 static __inline void 1616 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1617 { 1618 sc->nodesmsk &= ~(1 << id); 1619 } 1620 1621 static struct ieee80211_node * 1622 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1623 { 1624 struct wpi_node *wn; 1625 1626 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1627 M_NOWAIT | M_ZERO); 1628 1629 if (wn == NULL) 1630 return NULL; 1631 1632 wn->id = WPI_ID_UNDEFINED; 1633 1634 return &wn->ni; 1635 } 1636 1637 static void 1638 wpi_node_free(struct ieee80211_node *ni) 1639 { 1640 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1641 struct wpi_node *wn = WPI_NODE(ni); 1642 1643 if (wn->id != WPI_ID_UNDEFINED) { 1644 WPI_NT_LOCK(sc); 1645 if (wpi_check_node_entry(sc, wn->id)) { 1646 wpi_del_node_entry(sc, wn->id); 1647 wpi_del_node(sc, ni); 1648 } 1649 WPI_NT_UNLOCK(sc); 1650 } 1651 1652 sc->sc_node_free(ni); 1653 } 1654 1655 static __inline int 1656 wpi_check_bss_filter(struct wpi_softc *sc) 1657 { 1658 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1659 } 1660 1661 static void 1662 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1663 const struct ieee80211_rx_stats *rxs, 1664 int rssi, int nf) 1665 { 1666 struct ieee80211vap *vap = ni->ni_vap; 1667 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1668 struct wpi_vap *wvp = WPI_VAP(vap); 1669 uint64_t ni_tstamp, rx_tstamp; 1670 1671 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1672 1673 if (vap->iv_state == IEEE80211_S_RUN && 1674 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1675 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1676 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1677 rx_tstamp = le64toh(sc->rx_tstamp); 1678 1679 if (ni_tstamp >= rx_tstamp) { 1680 DPRINTF(sc, WPI_DEBUG_STATE, 1681 "ibss merge, tsf %ju tstamp %ju\n", 1682 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1683 (void) ieee80211_ibss_merge(ni); 1684 } 1685 } 1686 } 1687 1688 static void 1689 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1690 { 1691 struct wpi_softc *sc = arg; 1692 struct wpi_node *wn = WPI_NODE(ni); 1693 int error; 1694 1695 WPI_NT_LOCK(sc); 1696 if (wn->id != WPI_ID_UNDEFINED) { 1697 wn->id = WPI_ID_UNDEFINED; 1698 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1699 device_printf(sc->sc_dev, 1700 "%s: could not add IBSS node, error %d\n", 1701 __func__, error); 1702 } 1703 } 1704 WPI_NT_UNLOCK(sc); 1705 } 1706 1707 static void 1708 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1709 { 1710 struct ieee80211com *ic = &sc->sc_ic; 1711 1712 /* Set group keys once. */ 1713 WPI_NT_LOCK(sc); 1714 wvp->wv_gtk = 0; 1715 WPI_NT_UNLOCK(sc); 1716 1717 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1718 ieee80211_crypto_reload_keys(ic); 1719 } 1720 1721 /** 1722 * Called by net80211 when ever there is a change to 80211 state machine 1723 */ 1724 static int 1725 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1726 { 1727 struct wpi_vap *wvp = WPI_VAP(vap); 1728 struct ieee80211com *ic = vap->iv_ic; 1729 struct wpi_softc *sc = ic->ic_softc; 1730 int error = 0; 1731 1732 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1733 1734 WPI_TXQ_LOCK(sc); 1735 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1736 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1737 WPI_TXQ_UNLOCK(sc); 1738 1739 return ENXIO; 1740 } 1741 WPI_TXQ_UNLOCK(sc); 1742 1743 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1744 ieee80211_state_name[vap->iv_state], 1745 ieee80211_state_name[nstate]); 1746 1747 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1748 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1749 device_printf(sc->sc_dev, 1750 "%s: could not set power saving level\n", 1751 __func__); 1752 return error; 1753 } 1754 1755 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1756 } 1757 1758 switch (nstate) { 1759 case IEEE80211_S_SCAN: 1760 WPI_RXON_LOCK(sc); 1761 if (wpi_check_bss_filter(sc) != 0) { 1762 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1763 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1764 device_printf(sc->sc_dev, 1765 "%s: could not send RXON\n", __func__); 1766 } 1767 } 1768 WPI_RXON_UNLOCK(sc); 1769 break; 1770 1771 case IEEE80211_S_ASSOC: 1772 if (vap->iv_state != IEEE80211_S_RUN) 1773 break; 1774 /* FALLTHROUGH */ 1775 case IEEE80211_S_AUTH: 1776 /* 1777 * NB: do not optimize AUTH -> AUTH state transmission - 1778 * this will break powersave with non-QoS AP! 1779 */ 1780 1781 /* 1782 * The node must be registered in the firmware before auth. 1783 * Also the associd must be cleared on RUN -> ASSOC 1784 * transitions. 1785 */ 1786 if ((error = wpi_auth(sc, vap)) != 0) { 1787 device_printf(sc->sc_dev, 1788 "%s: could not move to AUTH state, error %d\n", 1789 __func__, error); 1790 } 1791 break; 1792 1793 case IEEE80211_S_RUN: 1794 /* 1795 * RUN -> RUN transition: 1796 * STA mode: Just restart the timers. 1797 * IBSS mode: Process IBSS merge. 1798 */ 1799 if (vap->iv_state == IEEE80211_S_RUN) { 1800 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1801 WPI_RXON_LOCK(sc); 1802 wpi_calib_timeout(sc); 1803 WPI_RXON_UNLOCK(sc); 1804 break; 1805 } else { 1806 /* 1807 * Drop the BSS_FILTER bit 1808 * (there is no another way to change bssid). 1809 */ 1810 WPI_RXON_LOCK(sc); 1811 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1812 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1813 device_printf(sc->sc_dev, 1814 "%s: could not send RXON\n", 1815 __func__); 1816 } 1817 WPI_RXON_UNLOCK(sc); 1818 1819 /* Restore all what was lost. */ 1820 wpi_restore_node_table(sc, wvp); 1821 1822 /* XXX set conditionally? */ 1823 wpi_updateedca(ic); 1824 } 1825 } 1826 1827 /* 1828 * !RUN -> RUN requires setting the association id 1829 * which is done with a firmware cmd. We also defer 1830 * starting the timers until that work is done. 1831 */ 1832 if ((error = wpi_run(sc, vap)) != 0) { 1833 device_printf(sc->sc_dev, 1834 "%s: could not move to RUN state\n", __func__); 1835 } 1836 break; 1837 1838 default: 1839 break; 1840 } 1841 if (error != 0) { 1842 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1843 return error; 1844 } 1845 1846 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1847 1848 return wvp->wv_newstate(vap, nstate, arg); 1849 } 1850 1851 static void 1852 wpi_calib_timeout(void *arg) 1853 { 1854 struct wpi_softc *sc = arg; 1855 1856 if (wpi_check_bss_filter(sc) == 0) 1857 return; 1858 1859 wpi_power_calibration(sc); 1860 1861 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1862 } 1863 1864 static __inline uint8_t 1865 rate2plcp(const uint8_t rate) 1866 { 1867 switch (rate) { 1868 case 12: return 0xd; 1869 case 18: return 0xf; 1870 case 24: return 0x5; 1871 case 36: return 0x7; 1872 case 48: return 0x9; 1873 case 72: return 0xb; 1874 case 96: return 0x1; 1875 case 108: return 0x3; 1876 case 2: return 10; 1877 case 4: return 20; 1878 case 11: return 55; 1879 case 22: return 110; 1880 default: return 0; 1881 } 1882 } 1883 1884 static __inline uint8_t 1885 plcp2rate(const uint8_t plcp) 1886 { 1887 switch (plcp) { 1888 case 0xd: return 12; 1889 case 0xf: return 18; 1890 case 0x5: return 24; 1891 case 0x7: return 36; 1892 case 0x9: return 48; 1893 case 0xb: return 72; 1894 case 0x1: return 96; 1895 case 0x3: return 108; 1896 case 10: return 2; 1897 case 20: return 4; 1898 case 55: return 11; 1899 case 110: return 22; 1900 default: return 0; 1901 } 1902 } 1903 1904 /* Quickly determine if a given rate is CCK or OFDM. */ 1905 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1906 1907 static void 1908 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1909 struct wpi_rx_data *data) 1910 { 1911 struct ieee80211com *ic = &sc->sc_ic; 1912 struct wpi_rx_ring *ring = &sc->rxq; 1913 struct wpi_rx_stat *stat; 1914 struct wpi_rx_head *head; 1915 struct wpi_rx_tail *tail; 1916 struct ieee80211_frame *wh; 1917 struct ieee80211_node *ni; 1918 struct mbuf *m, *m1; 1919 bus_addr_t paddr; 1920 uint32_t flags; 1921 uint16_t len; 1922 int error; 1923 1924 stat = (struct wpi_rx_stat *)(desc + 1); 1925 1926 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1927 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1928 goto fail1; 1929 } 1930 1931 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1932 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1933 len = le16toh(head->len); 1934 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1935 flags = le32toh(tail->flags); 1936 1937 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1938 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1939 le32toh(desc->len), len, (int8_t)stat->rssi, 1940 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1941 1942 /* Discard frames with a bad FCS early. */ 1943 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1944 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1945 __func__, flags); 1946 goto fail1; 1947 } 1948 /* Discard frames that are too short. */ 1949 if (len < sizeof (struct ieee80211_frame_ack)) { 1950 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1951 __func__, len); 1952 goto fail1; 1953 } 1954 1955 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1956 if (__predict_false(m1 == NULL)) { 1957 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1958 __func__); 1959 goto fail1; 1960 } 1961 bus_dmamap_unload(ring->data_dmat, data->map); 1962 1963 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1964 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1965 if (__predict_false(error != 0 && error != EFBIG)) { 1966 device_printf(sc->sc_dev, 1967 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1968 m_freem(m1); 1969 1970 /* Try to reload the old mbuf. */ 1971 error = bus_dmamap_load(ring->data_dmat, data->map, 1972 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1973 &paddr, BUS_DMA_NOWAIT); 1974 if (error != 0 && error != EFBIG) { 1975 panic("%s: could not load old RX mbuf", __func__); 1976 } 1977 /* Physical address may have changed. */ 1978 ring->desc[ring->cur] = htole32(paddr); 1979 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1980 BUS_DMASYNC_PREWRITE); 1981 goto fail1; 1982 } 1983 1984 m = data->m; 1985 data->m = m1; 1986 /* Update RX descriptor. */ 1987 ring->desc[ring->cur] = htole32(paddr); 1988 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1989 BUS_DMASYNC_PREWRITE); 1990 1991 /* Finalize mbuf. */ 1992 m->m_data = (caddr_t)(head + 1); 1993 m->m_pkthdr.len = m->m_len = len; 1994 1995 /* Grab a reference to the source node. */ 1996 wh = mtod(m, struct ieee80211_frame *); 1997 1998 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1999 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2000 /* Check whether decryption was successful or not. */ 2001 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2002 DPRINTF(sc, WPI_DEBUG_RECV, 2003 "CCMP decryption failed 0x%x\n", flags); 2004 goto fail2; 2005 } 2006 m->m_flags |= M_WEP; 2007 } 2008 2009 if (len >= sizeof(struct ieee80211_frame_min)) 2010 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2011 else 2012 ni = NULL; 2013 2014 sc->rx_tstamp = tail->tstamp; 2015 2016 if (ieee80211_radiotap_active(ic)) { 2017 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2018 2019 tap->wr_flags = 0; 2020 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2021 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2022 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2023 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2024 tap->wr_tsft = tail->tstamp; 2025 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2026 tap->wr_rate = plcp2rate(head->plcp); 2027 } 2028 2029 WPI_UNLOCK(sc); 2030 2031 /* Send the frame to the 802.11 layer. */ 2032 if (ni != NULL) { 2033 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2034 /* Node is no longer needed. */ 2035 ieee80211_free_node(ni); 2036 } else 2037 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2038 2039 WPI_LOCK(sc); 2040 2041 return; 2042 2043 fail2: m_freem(m); 2044 2045 fail1: counter_u64_add(ic->ic_ierrors, 1); 2046 } 2047 2048 static void 2049 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2050 struct wpi_rx_data *data) 2051 { 2052 /* Ignore */ 2053 } 2054 2055 static void 2056 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2057 { 2058 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 2059 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2060 struct wpi_tx_data *data = &ring->data[desc->idx]; 2061 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2062 struct mbuf *m; 2063 struct ieee80211_node *ni; 2064 uint32_t status = le32toh(stat->status); 2065 2066 KASSERT(data->ni != NULL, ("no node")); 2067 KASSERT(data->m != NULL, ("no mbuf")); 2068 2069 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2070 2071 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2072 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2073 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2074 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2075 2076 /* Unmap and free mbuf. */ 2077 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2078 bus_dmamap_unload(ring->data_dmat, data->map); 2079 m = data->m, data->m = NULL; 2080 ni = data->ni, data->ni = NULL; 2081 2082 /* 2083 * Update rate control statistics for the node. 2084 */ 2085 txs->short_retries = stat->rtsfailcnt; 2086 txs->long_retries = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2087 if (!(status & WPI_TX_STATUS_FAIL)) 2088 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 2089 else { 2090 switch (status & 0xff) { 2091 case WPI_TX_STATUS_FAIL_SHORT_LIMIT: 2092 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 2093 break; 2094 case WPI_TX_STATUS_FAIL_LONG_LIMIT: 2095 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 2096 break; 2097 case WPI_TX_STATUS_FAIL_LIFE_EXPIRE: 2098 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 2099 break; 2100 default: 2101 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 2102 break; 2103 } 2104 } 2105 2106 ieee80211_ratectl_tx_complete(ni, txs); 2107 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2108 2109 WPI_TXQ_STATE_LOCK(sc); 2110 if (--ring->queued > 0) 2111 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2112 else 2113 callout_stop(&sc->tx_timeout); 2114 WPI_TXQ_STATE_UNLOCK(sc); 2115 2116 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2117 } 2118 2119 /* 2120 * Process a "command done" firmware notification. This is where we wakeup 2121 * processes waiting for a synchronous command completion. 2122 */ 2123 static void 2124 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2125 { 2126 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2127 struct wpi_tx_data *data; 2128 struct wpi_tx_cmd *cmd; 2129 2130 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2131 "type %s len %d\n", desc->qid, desc->idx, 2132 desc->flags, wpi_cmd_str(desc->type), 2133 le32toh(desc->len)); 2134 2135 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2136 return; /* Not a command ack. */ 2137 2138 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2139 2140 data = &ring->data[desc->idx]; 2141 cmd = &ring->cmd[desc->idx]; 2142 2143 /* If the command was mapped in an mbuf, free it. */ 2144 if (data->m != NULL) { 2145 bus_dmamap_sync(ring->data_dmat, data->map, 2146 BUS_DMASYNC_POSTWRITE); 2147 bus_dmamap_unload(ring->data_dmat, data->map); 2148 m_freem(data->m); 2149 data->m = NULL; 2150 } 2151 2152 wakeup(cmd); 2153 2154 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2155 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2156 2157 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2158 BUS_DMASYNC_POSTREAD); 2159 2160 WPI_TXQ_LOCK(sc); 2161 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2162 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2163 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2164 } else { 2165 sc->sc_update_rx_ring = wpi_update_rx_ring; 2166 sc->sc_update_tx_ring = wpi_update_tx_ring; 2167 } 2168 WPI_TXQ_UNLOCK(sc); 2169 } 2170 } 2171 2172 static void 2173 wpi_notif_intr(struct wpi_softc *sc) 2174 { 2175 struct ieee80211com *ic = &sc->sc_ic; 2176 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2177 uint32_t hw; 2178 2179 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2180 BUS_DMASYNC_POSTREAD); 2181 2182 hw = le32toh(sc->shared->next) & 0xfff; 2183 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2184 2185 while (sc->rxq.cur != hw) { 2186 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2187 2188 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2189 struct wpi_rx_desc *desc; 2190 2191 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2192 BUS_DMASYNC_POSTREAD); 2193 desc = mtod(data->m, struct wpi_rx_desc *); 2194 2195 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2196 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2197 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2198 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2199 2200 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2201 /* Reply to a command. */ 2202 wpi_cmd_done(sc, desc); 2203 } 2204 2205 switch (desc->type) { 2206 case WPI_RX_DONE: 2207 /* An 802.11 frame has been received. */ 2208 wpi_rx_done(sc, desc, data); 2209 2210 if (__predict_false(sc->sc_running == 0)) { 2211 /* wpi_stop() was called. */ 2212 return; 2213 } 2214 2215 break; 2216 2217 case WPI_TX_DONE: 2218 /* An 802.11 frame has been transmitted. */ 2219 wpi_tx_done(sc, desc); 2220 break; 2221 2222 case WPI_RX_STATISTICS: 2223 case WPI_BEACON_STATISTICS: 2224 wpi_rx_statistics(sc, desc, data); 2225 break; 2226 2227 case WPI_BEACON_MISSED: 2228 { 2229 struct wpi_beacon_missed *miss = 2230 (struct wpi_beacon_missed *)(desc + 1); 2231 uint32_t expected, misses, received, threshold; 2232 2233 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2234 BUS_DMASYNC_POSTREAD); 2235 2236 misses = le32toh(miss->consecutive); 2237 expected = le32toh(miss->expected); 2238 received = le32toh(miss->received); 2239 threshold = MAX(2, vap->iv_bmissthreshold); 2240 2241 DPRINTF(sc, WPI_DEBUG_BMISS, 2242 "%s: beacons missed %u(%u) (received %u/%u)\n", 2243 __func__, misses, le32toh(miss->total), received, 2244 expected); 2245 2246 if (misses >= threshold || 2247 (received == 0 && expected >= threshold)) { 2248 WPI_RXON_LOCK(sc); 2249 if (callout_pending(&sc->scan_timeout)) { 2250 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2251 0, 1); 2252 } 2253 WPI_RXON_UNLOCK(sc); 2254 if (vap->iv_state == IEEE80211_S_RUN && 2255 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2256 ieee80211_beacon_miss(ic); 2257 } 2258 2259 break; 2260 } 2261 #ifdef WPI_DEBUG 2262 case WPI_BEACON_SENT: 2263 { 2264 struct wpi_tx_stat *stat = 2265 (struct wpi_tx_stat *)(desc + 1); 2266 uint64_t *tsf = (uint64_t *)(stat + 1); 2267 uint32_t *mode = (uint32_t *)(tsf + 1); 2268 2269 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2270 BUS_DMASYNC_POSTREAD); 2271 2272 DPRINTF(sc, WPI_DEBUG_BEACON, 2273 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2274 "duration %u, status %x, tsf %ju, mode %x\n", 2275 stat->rtsfailcnt, stat->ackfailcnt, 2276 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2277 le32toh(stat->status), le64toh(*tsf), 2278 le32toh(*mode)); 2279 2280 break; 2281 } 2282 #endif 2283 case WPI_UC_READY: 2284 { 2285 struct wpi_ucode_info *uc = 2286 (struct wpi_ucode_info *)(desc + 1); 2287 2288 /* The microcontroller is ready. */ 2289 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2290 BUS_DMASYNC_POSTREAD); 2291 DPRINTF(sc, WPI_DEBUG_RESET, 2292 "microcode alive notification version=%d.%d " 2293 "subtype=%x alive=%x\n", uc->major, uc->minor, 2294 uc->subtype, le32toh(uc->valid)); 2295 2296 if (le32toh(uc->valid) != 1) { 2297 device_printf(sc->sc_dev, 2298 "microcontroller initialization failed\n"); 2299 wpi_stop_locked(sc); 2300 return; 2301 } 2302 /* Save the address of the error log in SRAM. */ 2303 sc->errptr = le32toh(uc->errptr); 2304 break; 2305 } 2306 case WPI_STATE_CHANGED: 2307 { 2308 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2309 BUS_DMASYNC_POSTREAD); 2310 2311 uint32_t *status = (uint32_t *)(desc + 1); 2312 2313 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2314 le32toh(*status)); 2315 2316 if (le32toh(*status) & 1) { 2317 WPI_NT_LOCK(sc); 2318 wpi_clear_node_table(sc); 2319 WPI_NT_UNLOCK(sc); 2320 ieee80211_runtask(ic, 2321 &sc->sc_radiooff_task); 2322 return; 2323 } 2324 break; 2325 } 2326 #ifdef WPI_DEBUG 2327 case WPI_START_SCAN: 2328 { 2329 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2330 BUS_DMASYNC_POSTREAD); 2331 2332 struct wpi_start_scan *scan = 2333 (struct wpi_start_scan *)(desc + 1); 2334 DPRINTF(sc, WPI_DEBUG_SCAN, 2335 "%s: scanning channel %d status %x\n", 2336 __func__, scan->chan, le32toh(scan->status)); 2337 2338 break; 2339 } 2340 #endif 2341 case WPI_STOP_SCAN: 2342 { 2343 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2344 BUS_DMASYNC_POSTREAD); 2345 2346 struct wpi_stop_scan *scan = 2347 (struct wpi_stop_scan *)(desc + 1); 2348 2349 DPRINTF(sc, WPI_DEBUG_SCAN, 2350 "scan finished nchan=%d status=%d chan=%d\n", 2351 scan->nchan, scan->status, scan->chan); 2352 2353 WPI_RXON_LOCK(sc); 2354 callout_stop(&sc->scan_timeout); 2355 WPI_RXON_UNLOCK(sc); 2356 if (scan->status == WPI_SCAN_ABORTED) 2357 ieee80211_cancel_scan(vap); 2358 else 2359 ieee80211_scan_next(vap); 2360 break; 2361 } 2362 } 2363 2364 if (sc->rxq.cur % 8 == 0) { 2365 /* Tell the firmware what we have processed. */ 2366 sc->sc_update_rx_ring(sc); 2367 } 2368 } 2369 } 2370 2371 /* 2372 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2373 * from power-down sleep mode. 2374 */ 2375 static void 2376 wpi_wakeup_intr(struct wpi_softc *sc) 2377 { 2378 int qid; 2379 2380 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2381 "%s: ucode wakeup from power-down sleep\n", __func__); 2382 2383 /* Wakeup RX and TX rings. */ 2384 if (sc->rxq.update) { 2385 sc->rxq.update = 0; 2386 wpi_update_rx_ring(sc); 2387 } 2388 WPI_TXQ_LOCK(sc); 2389 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2390 struct wpi_tx_ring *ring = &sc->txq[qid]; 2391 2392 if (ring->update) { 2393 ring->update = 0; 2394 wpi_update_tx_ring(sc, ring); 2395 } 2396 } 2397 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2398 WPI_TXQ_UNLOCK(sc); 2399 } 2400 2401 /* 2402 * This function prints firmware registers 2403 */ 2404 #ifdef WPI_DEBUG 2405 static void 2406 wpi_debug_registers(struct wpi_softc *sc) 2407 { 2408 size_t i; 2409 static const uint32_t csr_tbl[] = { 2410 WPI_HW_IF_CONFIG, 2411 WPI_INT, 2412 WPI_INT_MASK, 2413 WPI_FH_INT, 2414 WPI_GPIO_IN, 2415 WPI_RESET, 2416 WPI_GP_CNTRL, 2417 WPI_EEPROM, 2418 WPI_EEPROM_GP, 2419 WPI_GIO, 2420 WPI_UCODE_GP1, 2421 WPI_UCODE_GP2, 2422 WPI_GIO_CHICKEN, 2423 WPI_ANA_PLL, 2424 WPI_DBG_HPET_MEM, 2425 }; 2426 static const uint32_t prph_tbl[] = { 2427 WPI_APMG_CLK_CTRL, 2428 WPI_APMG_PS, 2429 WPI_APMG_PCI_STT, 2430 WPI_APMG_RFKILL, 2431 }; 2432 2433 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2434 2435 for (i = 0; i < nitems(csr_tbl); i++) { 2436 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2437 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2438 2439 if ((i + 1) % 2 == 0) 2440 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2441 } 2442 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2443 2444 if (wpi_nic_lock(sc) == 0) { 2445 for (i = 0; i < nitems(prph_tbl); i++) { 2446 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2447 wpi_get_prph_string(prph_tbl[i]), 2448 wpi_prph_read(sc, prph_tbl[i])); 2449 2450 if ((i + 1) % 2 == 0) 2451 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2452 } 2453 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2454 wpi_nic_unlock(sc); 2455 } else { 2456 DPRINTF(sc, WPI_DEBUG_REGISTER, 2457 "Cannot access internal registers.\n"); 2458 } 2459 } 2460 #endif 2461 2462 /* 2463 * Dump the error log of the firmware when a firmware panic occurs. Although 2464 * we can't debug the firmware because it is neither open source nor free, it 2465 * can help us to identify certain classes of problems. 2466 */ 2467 static void 2468 wpi_fatal_intr(struct wpi_softc *sc) 2469 { 2470 struct wpi_fw_dump dump; 2471 uint32_t i, offset, count; 2472 2473 /* Check that the error log address is valid. */ 2474 if (sc->errptr < WPI_FW_DATA_BASE || 2475 sc->errptr + sizeof (dump) > 2476 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2477 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2478 sc->errptr); 2479 return; 2480 } 2481 if (wpi_nic_lock(sc) != 0) { 2482 printf("%s: could not read firmware error log\n", __func__); 2483 return; 2484 } 2485 /* Read number of entries in the log. */ 2486 count = wpi_mem_read(sc, sc->errptr); 2487 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2488 printf("%s: invalid count field (count = %u)\n", __func__, 2489 count); 2490 wpi_nic_unlock(sc); 2491 return; 2492 } 2493 /* Skip "count" field. */ 2494 offset = sc->errptr + sizeof (uint32_t); 2495 printf("firmware error log (count = %u):\n", count); 2496 for (i = 0; i < count; i++) { 2497 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2498 sizeof (dump) / sizeof (uint32_t)); 2499 2500 printf(" error type = \"%s\" (0x%08X)\n", 2501 (dump.desc < nitems(wpi_fw_errmsg)) ? 2502 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2503 dump.desc); 2504 printf(" error data = 0x%08X\n", 2505 dump.data); 2506 printf(" branch link = 0x%08X%08X\n", 2507 dump.blink[0], dump.blink[1]); 2508 printf(" interrupt link = 0x%08X%08X\n", 2509 dump.ilink[0], dump.ilink[1]); 2510 printf(" time = %u\n", dump.time); 2511 2512 offset += sizeof (dump); 2513 } 2514 wpi_nic_unlock(sc); 2515 /* Dump driver status (TX and RX rings) while we're here. */ 2516 printf("driver status:\n"); 2517 WPI_TXQ_LOCK(sc); 2518 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2519 struct wpi_tx_ring *ring = &sc->txq[i]; 2520 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2521 i, ring->qid, ring->cur, ring->queued); 2522 } 2523 WPI_TXQ_UNLOCK(sc); 2524 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2525 } 2526 2527 static void 2528 wpi_intr(void *arg) 2529 { 2530 struct wpi_softc *sc = arg; 2531 uint32_t r1, r2; 2532 2533 WPI_LOCK(sc); 2534 2535 /* Disable interrupts. */ 2536 WPI_WRITE(sc, WPI_INT_MASK, 0); 2537 2538 r1 = WPI_READ(sc, WPI_INT); 2539 2540 if (__predict_false(r1 == 0xffffffff || 2541 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2542 goto end; /* Hardware gone! */ 2543 2544 r2 = WPI_READ(sc, WPI_FH_INT); 2545 2546 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2547 r1, r2); 2548 2549 if (r1 == 0 && r2 == 0) 2550 goto done; /* Interrupt not for us. */ 2551 2552 /* Acknowledge interrupts. */ 2553 WPI_WRITE(sc, WPI_INT, r1); 2554 WPI_WRITE(sc, WPI_FH_INT, r2); 2555 2556 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2557 struct ieee80211com *ic = &sc->sc_ic; 2558 2559 device_printf(sc->sc_dev, "fatal firmware error\n"); 2560 #ifdef WPI_DEBUG 2561 wpi_debug_registers(sc); 2562 #endif 2563 wpi_fatal_intr(sc); 2564 DPRINTF(sc, WPI_DEBUG_HW, 2565 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2566 "(Hardware Error)"); 2567 ieee80211_restart_all(ic); 2568 goto end; 2569 } 2570 2571 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2572 (r2 & WPI_FH_INT_RX)) 2573 wpi_notif_intr(sc); 2574 2575 if (r1 & WPI_INT_ALIVE) 2576 wakeup(sc); /* Firmware is alive. */ 2577 2578 if (r1 & WPI_INT_WAKEUP) 2579 wpi_wakeup_intr(sc); 2580 2581 done: 2582 /* Re-enable interrupts. */ 2583 if (__predict_true(sc->sc_running)) 2584 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2585 2586 end: WPI_UNLOCK(sc); 2587 } 2588 2589 static void 2590 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2591 { 2592 struct wpi_tx_ring *ring; 2593 struct wpi_tx_data *data; 2594 uint8_t cur; 2595 2596 WPI_TXQ_LOCK(sc); 2597 ring = &sc->txq[ac]; 2598 2599 while (ring->pending != 0) { 2600 ring->pending--; 2601 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2602 data = &ring->data[cur]; 2603 2604 bus_dmamap_sync(ring->data_dmat, data->map, 2605 BUS_DMASYNC_POSTWRITE); 2606 bus_dmamap_unload(ring->data_dmat, data->map); 2607 m_freem(data->m); 2608 data->m = NULL; 2609 2610 ieee80211_node_decref(data->ni); 2611 data->ni = NULL; 2612 } 2613 2614 WPI_TXQ_UNLOCK(sc); 2615 } 2616 2617 static int 2618 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2619 { 2620 struct ieee80211_frame *wh; 2621 struct wpi_tx_cmd *cmd; 2622 struct wpi_tx_data *data; 2623 struct wpi_tx_desc *desc; 2624 struct wpi_tx_ring *ring; 2625 struct mbuf *m1; 2626 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2627 uint8_t cur, pad; 2628 uint16_t hdrlen; 2629 int error, i, nsegs, totlen, frag; 2630 2631 WPI_TXQ_LOCK(sc); 2632 2633 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2634 2635 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2636 2637 if (__predict_false(sc->sc_running == 0)) { 2638 /* wpi_stop() was called */ 2639 error = ENETDOWN; 2640 goto end; 2641 } 2642 2643 wh = mtod(buf->m, struct ieee80211_frame *); 2644 hdrlen = ieee80211_anyhdrsize(wh); 2645 totlen = buf->m->m_pkthdr.len; 2646 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2647 2648 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2649 error = EINVAL; 2650 goto end; 2651 } 2652 2653 if (hdrlen & 3) { 2654 /* First segment length must be a multiple of 4. */ 2655 pad = 4 - (hdrlen & 3); 2656 } else 2657 pad = 0; 2658 2659 ring = &sc->txq[buf->ac]; 2660 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2661 desc = &ring->desc[cur]; 2662 data = &ring->data[cur]; 2663 2664 /* Prepare TX firmware command. */ 2665 cmd = &ring->cmd[cur]; 2666 cmd->code = buf->code; 2667 cmd->flags = 0; 2668 cmd->qid = ring->qid; 2669 cmd->idx = cur; 2670 2671 memcpy(cmd->data, buf->data, buf->size); 2672 2673 /* Save and trim IEEE802.11 header. */ 2674 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2675 m_adj(buf->m, hdrlen); 2676 2677 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2678 segs, &nsegs, BUS_DMA_NOWAIT); 2679 if (error != 0 && error != EFBIG) { 2680 device_printf(sc->sc_dev, 2681 "%s: can't map mbuf (error %d)\n", __func__, error); 2682 goto end; 2683 } 2684 if (error != 0) { 2685 /* Too many DMA segments, linearize mbuf. */ 2686 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2687 if (m1 == NULL) { 2688 device_printf(sc->sc_dev, 2689 "%s: could not defrag mbuf\n", __func__); 2690 error = ENOBUFS; 2691 goto end; 2692 } 2693 buf->m = m1; 2694 2695 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2696 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2697 if (__predict_false(error != 0)) { 2698 /* XXX fix this (applicable to the iwn(4) too) */ 2699 /* 2700 * NB: Do not return error; 2701 * original mbuf does not exist anymore. 2702 */ 2703 device_printf(sc->sc_dev, 2704 "%s: can't map mbuf (error %d)\n", __func__, 2705 error); 2706 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2707 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2708 IFCOUNTER_OERRORS, 1); 2709 if (!frag) 2710 ieee80211_free_node(buf->ni); 2711 } 2712 m_freem(buf->m); 2713 error = 0; 2714 goto end; 2715 } 2716 } 2717 2718 KASSERT(nsegs < WPI_MAX_SCATTER, 2719 ("too many DMA segments, nsegs (%d) should be less than %d", 2720 nsegs, WPI_MAX_SCATTER)); 2721 2722 data->m = buf->m; 2723 data->ni = buf->ni; 2724 2725 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2726 __func__, ring->qid, cur, totlen, nsegs); 2727 2728 /* Fill TX descriptor. */ 2729 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2730 /* First DMA segment is used by the TX command. */ 2731 desc->segs[0].addr = htole32(data->cmd_paddr); 2732 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2733 /* Other DMA segments are for data payload. */ 2734 seg = &segs[0]; 2735 for (i = 1; i <= nsegs; i++) { 2736 desc->segs[i].addr = htole32(seg->ds_addr); 2737 desc->segs[i].len = htole32(seg->ds_len); 2738 seg++; 2739 } 2740 2741 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2742 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2743 BUS_DMASYNC_PREWRITE); 2744 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2745 BUS_DMASYNC_PREWRITE); 2746 2747 ring->pending += 1; 2748 2749 if (!frag) { 2750 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2751 WPI_TXQ_STATE_LOCK(sc); 2752 ring->queued += ring->pending; 2753 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2754 sc); 2755 WPI_TXQ_STATE_UNLOCK(sc); 2756 } 2757 2758 /* Kick TX ring. */ 2759 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2760 ring->pending = 0; 2761 sc->sc_update_tx_ring(sc, ring); 2762 } else 2763 ieee80211_node_incref(data->ni); 2764 2765 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2766 __func__); 2767 2768 WPI_TXQ_UNLOCK(sc); 2769 2770 return (error); 2771 } 2772 2773 /* 2774 * Construct the data packet for a transmit buffer. 2775 */ 2776 static int 2777 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2778 { 2779 const struct ieee80211_txparam *tp; 2780 struct ieee80211vap *vap = ni->ni_vap; 2781 struct ieee80211com *ic = ni->ni_ic; 2782 struct wpi_node *wn = WPI_NODE(ni); 2783 struct ieee80211_channel *chan; 2784 struct ieee80211_frame *wh; 2785 struct ieee80211_key *k = NULL; 2786 struct wpi_buf tx_data; 2787 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2788 uint32_t flags; 2789 uint16_t ac, qos; 2790 uint8_t tid, type, rate; 2791 int swcrypt, ismcast, totlen; 2792 2793 wh = mtod(m, struct ieee80211_frame *); 2794 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2795 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2796 swcrypt = 1; 2797 2798 /* Select EDCA Access Category and TX ring for this frame. */ 2799 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2800 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2801 tid = qos & IEEE80211_QOS_TID; 2802 } else { 2803 qos = 0; 2804 tid = 0; 2805 } 2806 ac = M_WME_GETAC(m); 2807 2808 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2809 ni->ni_chan : ic->ic_curchan; 2810 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2811 2812 /* Choose a TX rate index. */ 2813 if (type == IEEE80211_FC0_TYPE_MGT) 2814 rate = tp->mgmtrate; 2815 else if (ismcast) 2816 rate = tp->mcastrate; 2817 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2818 rate = tp->ucastrate; 2819 else if (m->m_flags & M_EAPOL) 2820 rate = tp->mgmtrate; 2821 else { 2822 /* XXX pass pktlen */ 2823 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2824 rate = ni->ni_txrate; 2825 } 2826 2827 /* Encrypt the frame if need be. */ 2828 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2829 /* Retrieve key for TX. */ 2830 k = ieee80211_crypto_encap(ni, m); 2831 if (k == NULL) 2832 return (ENOBUFS); 2833 2834 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2835 2836 /* 802.11 header may have moved. */ 2837 wh = mtod(m, struct ieee80211_frame *); 2838 } 2839 totlen = m->m_pkthdr.len; 2840 2841 if (ieee80211_radiotap_active_vap(vap)) { 2842 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2843 2844 tap->wt_flags = 0; 2845 tap->wt_rate = rate; 2846 if (k != NULL) 2847 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2848 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2849 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2850 2851 ieee80211_radiotap_tx(vap, m); 2852 } 2853 2854 flags = 0; 2855 if (!ismcast) { 2856 /* Unicast frame, check if an ACK is expected. */ 2857 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2858 IEEE80211_QOS_ACKPOLICY_NOACK) 2859 flags |= WPI_TX_NEED_ACK; 2860 } 2861 2862 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2863 flags |= WPI_TX_AUTO_SEQ; 2864 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2865 flags |= WPI_TX_MORE_FRAG; 2866 2867 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2868 if (!ismcast) { 2869 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2870 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2871 flags |= WPI_TX_NEED_RTS; 2872 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2873 WPI_RATE_IS_OFDM(rate)) { 2874 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2875 flags |= WPI_TX_NEED_CTS; 2876 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2877 flags |= WPI_TX_NEED_RTS; 2878 } 2879 2880 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2881 flags |= WPI_TX_FULL_TXOP; 2882 } 2883 2884 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2885 if (type == IEEE80211_FC0_TYPE_MGT) { 2886 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2887 2888 /* Tell HW to set timestamp in probe responses. */ 2889 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2890 flags |= WPI_TX_INSERT_TSTAMP; 2891 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2892 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2893 tx->timeout = htole16(3); 2894 else 2895 tx->timeout = htole16(2); 2896 } 2897 2898 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2899 tx->id = WPI_ID_BROADCAST; 2900 else { 2901 if (wn->id == WPI_ID_UNDEFINED) { 2902 device_printf(sc->sc_dev, 2903 "%s: undefined node id\n", __func__); 2904 return (EINVAL); 2905 } 2906 2907 tx->id = wn->id; 2908 } 2909 2910 if (!swcrypt) { 2911 switch (k->wk_cipher->ic_cipher) { 2912 case IEEE80211_CIPHER_AES_CCM: 2913 tx->security = WPI_CIPHER_CCMP; 2914 break; 2915 2916 default: 2917 break; 2918 } 2919 2920 memcpy(tx->key, k->wk_key, k->wk_keylen); 2921 } 2922 2923 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2924 struct mbuf *next = m->m_nextpkt; 2925 2926 tx->lnext = htole16(next->m_pkthdr.len); 2927 tx->fnext = htole32(tx->security | 2928 (flags & WPI_TX_NEED_ACK) | 2929 WPI_NEXT_STA_ID(tx->id)); 2930 } 2931 2932 tx->len = htole16(totlen); 2933 tx->flags = htole32(flags); 2934 tx->plcp = rate2plcp(rate); 2935 tx->tid = tid; 2936 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2937 tx->ofdm_mask = 0xff; 2938 tx->cck_mask = 0x0f; 2939 tx->rts_ntries = 7; 2940 tx->data_ntries = tp->maxretry; 2941 2942 tx_data.ni = ni; 2943 tx_data.m = m; 2944 tx_data.size = sizeof(struct wpi_cmd_data); 2945 tx_data.code = WPI_CMD_TX_DATA; 2946 tx_data.ac = ac; 2947 2948 return wpi_cmd2(sc, &tx_data); 2949 } 2950 2951 static int 2952 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2953 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2954 { 2955 struct ieee80211vap *vap = ni->ni_vap; 2956 struct ieee80211_key *k = NULL; 2957 struct ieee80211_frame *wh; 2958 struct wpi_buf tx_data; 2959 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2960 uint32_t flags; 2961 uint8_t ac, type, rate; 2962 int swcrypt, totlen; 2963 2964 wh = mtod(m, struct ieee80211_frame *); 2965 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2966 swcrypt = 1; 2967 2968 ac = params->ibp_pri & 3; 2969 2970 /* Choose a TX rate index. */ 2971 rate = params->ibp_rate0; 2972 2973 flags = 0; 2974 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2975 flags |= WPI_TX_AUTO_SEQ; 2976 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2977 flags |= WPI_TX_NEED_ACK; 2978 if (params->ibp_flags & IEEE80211_BPF_RTS) 2979 flags |= WPI_TX_NEED_RTS; 2980 if (params->ibp_flags & IEEE80211_BPF_CTS) 2981 flags |= WPI_TX_NEED_CTS; 2982 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2983 flags |= WPI_TX_FULL_TXOP; 2984 2985 /* Encrypt the frame if need be. */ 2986 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2987 /* Retrieve key for TX. */ 2988 k = ieee80211_crypto_encap(ni, m); 2989 if (k == NULL) 2990 return (ENOBUFS); 2991 2992 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2993 2994 /* 802.11 header may have moved. */ 2995 wh = mtod(m, struct ieee80211_frame *); 2996 } 2997 totlen = m->m_pkthdr.len; 2998 2999 if (ieee80211_radiotap_active_vap(vap)) { 3000 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 3001 3002 tap->wt_flags = 0; 3003 tap->wt_rate = rate; 3004 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 3005 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3006 3007 ieee80211_radiotap_tx(vap, m); 3008 } 3009 3010 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3011 if (type == IEEE80211_FC0_TYPE_MGT) { 3012 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3013 3014 /* Tell HW to set timestamp in probe responses. */ 3015 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3016 flags |= WPI_TX_INSERT_TSTAMP; 3017 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3018 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3019 tx->timeout = htole16(3); 3020 else 3021 tx->timeout = htole16(2); 3022 } 3023 3024 if (!swcrypt) { 3025 switch (k->wk_cipher->ic_cipher) { 3026 case IEEE80211_CIPHER_AES_CCM: 3027 tx->security = WPI_CIPHER_CCMP; 3028 break; 3029 3030 default: 3031 break; 3032 } 3033 3034 memcpy(tx->key, k->wk_key, k->wk_keylen); 3035 } 3036 3037 tx->len = htole16(totlen); 3038 tx->flags = htole32(flags); 3039 tx->plcp = rate2plcp(rate); 3040 tx->id = WPI_ID_BROADCAST; 3041 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3042 tx->rts_ntries = params->ibp_try1; 3043 tx->data_ntries = params->ibp_try0; 3044 3045 tx_data.ni = ni; 3046 tx_data.m = m; 3047 tx_data.size = sizeof(struct wpi_cmd_data); 3048 tx_data.code = WPI_CMD_TX_DATA; 3049 tx_data.ac = ac; 3050 3051 return wpi_cmd2(sc, &tx_data); 3052 } 3053 3054 static __inline int 3055 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3056 { 3057 struct wpi_tx_ring *ring = &sc->txq[ac]; 3058 int retval; 3059 3060 WPI_TXQ_STATE_LOCK(sc); 3061 retval = WPI_TX_RING_HIMARK - ring->queued; 3062 WPI_TXQ_STATE_UNLOCK(sc); 3063 3064 return retval; 3065 } 3066 3067 static int 3068 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3069 const struct ieee80211_bpf_params *params) 3070 { 3071 struct ieee80211com *ic = ni->ni_ic; 3072 struct wpi_softc *sc = ic->ic_softc; 3073 uint16_t ac; 3074 int error = 0; 3075 3076 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3077 3078 ac = M_WME_GETAC(m); 3079 3080 WPI_TX_LOCK(sc); 3081 3082 /* NB: no fragments here */ 3083 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3084 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3085 goto unlock; 3086 } 3087 3088 if (params == NULL) { 3089 /* 3090 * Legacy path; interpret frame contents to decide 3091 * precisely how to send the frame. 3092 */ 3093 error = wpi_tx_data(sc, m, ni); 3094 } else { 3095 /* 3096 * Caller supplied explicit parameters to use in 3097 * sending the frame. 3098 */ 3099 error = wpi_tx_data_raw(sc, m, ni, params); 3100 } 3101 3102 unlock: WPI_TX_UNLOCK(sc); 3103 3104 if (error != 0) { 3105 m_freem(m); 3106 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3107 3108 return error; 3109 } 3110 3111 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3112 3113 return 0; 3114 } 3115 3116 static int 3117 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3118 { 3119 struct wpi_softc *sc = ic->ic_softc; 3120 struct ieee80211_node *ni; 3121 struct mbuf *mnext; 3122 uint16_t ac; 3123 int error, nmbufs; 3124 3125 WPI_TX_LOCK(sc); 3126 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3127 3128 /* Check if interface is up & running. */ 3129 if (__predict_false(sc->sc_running == 0)) { 3130 error = ENXIO; 3131 goto unlock; 3132 } 3133 3134 nmbufs = 1; 3135 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3136 nmbufs++; 3137 3138 /* Check for available space. */ 3139 ac = M_WME_GETAC(m); 3140 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3141 error = ENOBUFS; 3142 goto unlock; 3143 } 3144 3145 error = 0; 3146 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3147 do { 3148 mnext = m->m_nextpkt; 3149 if (wpi_tx_data(sc, m, ni) != 0) { 3150 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3151 nmbufs); 3152 wpi_free_txfrags(sc, ac); 3153 ieee80211_free_mbuf(m); 3154 ieee80211_free_node(ni); 3155 break; 3156 } 3157 } while((m = mnext) != NULL); 3158 3159 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3160 3161 unlock: WPI_TX_UNLOCK(sc); 3162 3163 return (error); 3164 } 3165 3166 static void 3167 wpi_watchdog_rfkill(void *arg) 3168 { 3169 struct wpi_softc *sc = arg; 3170 struct ieee80211com *ic = &sc->sc_ic; 3171 3172 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3173 3174 /* No need to lock firmware memory. */ 3175 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3176 /* Radio kill switch is still off. */ 3177 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3178 sc); 3179 } else 3180 ieee80211_runtask(ic, &sc->sc_radioon_task); 3181 } 3182 3183 static void 3184 wpi_scan_timeout(void *arg) 3185 { 3186 struct wpi_softc *sc = arg; 3187 struct ieee80211com *ic = &sc->sc_ic; 3188 3189 ic_printf(ic, "scan timeout\n"); 3190 ieee80211_restart_all(ic); 3191 } 3192 3193 static void 3194 wpi_tx_timeout(void *arg) 3195 { 3196 struct wpi_softc *sc = arg; 3197 struct ieee80211com *ic = &sc->sc_ic; 3198 3199 ic_printf(ic, "device timeout\n"); 3200 ieee80211_restart_all(ic); 3201 } 3202 3203 static void 3204 wpi_parent(struct ieee80211com *ic) 3205 { 3206 struct wpi_softc *sc = ic->ic_softc; 3207 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3208 3209 if (ic->ic_nrunning > 0) { 3210 if (wpi_init(sc) == 0) { 3211 ieee80211_notify_radio(ic, 1); 3212 ieee80211_start_all(ic); 3213 } else { 3214 ieee80211_notify_radio(ic, 0); 3215 ieee80211_stop(vap); 3216 } 3217 } else { 3218 ieee80211_notify_radio(ic, 0); 3219 wpi_stop(sc); 3220 } 3221 } 3222 3223 /* 3224 * Send a command to the firmware. 3225 */ 3226 static int 3227 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3228 int async) 3229 { 3230 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3231 struct wpi_tx_desc *desc; 3232 struct wpi_tx_data *data; 3233 struct wpi_tx_cmd *cmd; 3234 struct mbuf *m; 3235 bus_addr_t paddr; 3236 uint16_t totlen; 3237 int error; 3238 3239 WPI_TXQ_LOCK(sc); 3240 3241 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3242 3243 if (__predict_false(sc->sc_running == 0)) { 3244 /* wpi_stop() was called */ 3245 if (code == WPI_CMD_SCAN) 3246 error = ENETDOWN; 3247 else 3248 error = 0; 3249 3250 goto fail; 3251 } 3252 3253 if (async == 0) 3254 WPI_LOCK_ASSERT(sc); 3255 3256 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3257 __func__, wpi_cmd_str(code), size, async); 3258 3259 desc = &ring->desc[ring->cur]; 3260 data = &ring->data[ring->cur]; 3261 totlen = 4 + size; 3262 3263 if (size > sizeof cmd->data) { 3264 /* Command is too large to fit in a descriptor. */ 3265 if (totlen > MCLBYTES) { 3266 error = EINVAL; 3267 goto fail; 3268 } 3269 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3270 if (m == NULL) { 3271 error = ENOMEM; 3272 goto fail; 3273 } 3274 cmd = mtod(m, struct wpi_tx_cmd *); 3275 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3276 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3277 if (error != 0) { 3278 m_freem(m); 3279 goto fail; 3280 } 3281 data->m = m; 3282 } else { 3283 cmd = &ring->cmd[ring->cur]; 3284 paddr = data->cmd_paddr; 3285 } 3286 3287 cmd->code = code; 3288 cmd->flags = 0; 3289 cmd->qid = ring->qid; 3290 cmd->idx = ring->cur; 3291 memcpy(cmd->data, buf, size); 3292 3293 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3294 desc->segs[0].addr = htole32(paddr); 3295 desc->segs[0].len = htole32(totlen); 3296 3297 if (size > sizeof cmd->data) { 3298 bus_dmamap_sync(ring->data_dmat, data->map, 3299 BUS_DMASYNC_PREWRITE); 3300 } else { 3301 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3302 BUS_DMASYNC_PREWRITE); 3303 } 3304 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3305 BUS_DMASYNC_PREWRITE); 3306 3307 /* Kick command ring. */ 3308 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3309 sc->sc_update_tx_ring(sc, ring); 3310 3311 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3312 3313 WPI_TXQ_UNLOCK(sc); 3314 3315 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3316 3317 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3318 3319 WPI_TXQ_UNLOCK(sc); 3320 3321 return error; 3322 } 3323 3324 /* 3325 * Configure HW multi-rate retries. 3326 */ 3327 static int 3328 wpi_mrr_setup(struct wpi_softc *sc) 3329 { 3330 struct ieee80211com *ic = &sc->sc_ic; 3331 struct wpi_mrr_setup mrr; 3332 uint8_t i; 3333 int error; 3334 3335 /* CCK rates (not used with 802.11a). */ 3336 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3337 mrr.rates[i].flags = 0; 3338 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3339 /* Fallback to the immediate lower CCK rate (if any.) */ 3340 mrr.rates[i].next = 3341 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3342 /* Try twice at this rate before falling back to "next". */ 3343 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3344 } 3345 /* OFDM rates (not used with 802.11b). */ 3346 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3347 mrr.rates[i].flags = 0; 3348 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3349 /* Fallback to the immediate lower rate (if any.) */ 3350 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3351 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3352 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3353 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3354 i - 1; 3355 /* Try twice at this rate before falling back to "next". */ 3356 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3357 } 3358 /* Setup MRR for control frames. */ 3359 mrr.which = htole32(WPI_MRR_CTL); 3360 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3361 if (error != 0) { 3362 device_printf(sc->sc_dev, 3363 "could not setup MRR for control frames\n"); 3364 return error; 3365 } 3366 /* Setup MRR for data frames. */ 3367 mrr.which = htole32(WPI_MRR_DATA); 3368 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3369 if (error != 0) { 3370 device_printf(sc->sc_dev, 3371 "could not setup MRR for data frames\n"); 3372 return error; 3373 } 3374 return 0; 3375 } 3376 3377 static int 3378 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3379 { 3380 struct ieee80211com *ic = ni->ni_ic; 3381 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3382 struct wpi_node *wn = WPI_NODE(ni); 3383 struct wpi_node_info node; 3384 int error; 3385 3386 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3387 3388 if (wn->id == WPI_ID_UNDEFINED) 3389 return EINVAL; 3390 3391 memset(&node, 0, sizeof node); 3392 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3393 node.id = wn->id; 3394 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3395 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3396 node.action = htole32(WPI_ACTION_SET_RATE); 3397 node.antenna = WPI_ANTENNA_BOTH; 3398 3399 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3400 wn->id, ether_sprintf(ni->ni_macaddr)); 3401 3402 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3403 if (error != 0) { 3404 device_printf(sc->sc_dev, 3405 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3406 error); 3407 return error; 3408 } 3409 3410 if (wvp->wv_gtk != 0) { 3411 error = wpi_set_global_keys(ni); 3412 if (error != 0) { 3413 device_printf(sc->sc_dev, 3414 "%s: error while setting global keys\n", __func__); 3415 return ENXIO; 3416 } 3417 } 3418 3419 return 0; 3420 } 3421 3422 /* 3423 * Broadcast node is used to send group-addressed and management frames. 3424 */ 3425 static int 3426 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3427 { 3428 struct ieee80211com *ic = &sc->sc_ic; 3429 struct wpi_node_info node; 3430 3431 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3432 3433 memset(&node, 0, sizeof node); 3434 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3435 node.id = WPI_ID_BROADCAST; 3436 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3437 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3438 node.action = htole32(WPI_ACTION_SET_RATE); 3439 node.antenna = WPI_ANTENNA_BOTH; 3440 3441 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3442 3443 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3444 } 3445 3446 static int 3447 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3448 { 3449 struct wpi_node *wn = WPI_NODE(ni); 3450 int error; 3451 3452 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3453 3454 wn->id = wpi_add_node_entry_sta(sc); 3455 3456 if ((error = wpi_add_node(sc, ni)) != 0) { 3457 wpi_del_node_entry(sc, wn->id); 3458 wn->id = WPI_ID_UNDEFINED; 3459 return error; 3460 } 3461 3462 return 0; 3463 } 3464 3465 static int 3466 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3467 { 3468 struct wpi_node *wn = WPI_NODE(ni); 3469 int error; 3470 3471 KASSERT(wn->id == WPI_ID_UNDEFINED, 3472 ("the node %d was added before", wn->id)); 3473 3474 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3475 3476 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3477 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3478 return ENOMEM; 3479 } 3480 3481 if ((error = wpi_add_node(sc, ni)) != 0) { 3482 wpi_del_node_entry(sc, wn->id); 3483 wn->id = WPI_ID_UNDEFINED; 3484 return error; 3485 } 3486 3487 return 0; 3488 } 3489 3490 static void 3491 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3492 { 3493 struct wpi_node *wn = WPI_NODE(ni); 3494 struct wpi_cmd_del_node node; 3495 int error; 3496 3497 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3498 3499 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3500 3501 memset(&node, 0, sizeof node); 3502 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3503 node.count = 1; 3504 3505 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3506 wn->id, ether_sprintf(ni->ni_macaddr)); 3507 3508 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3509 if (error != 0) { 3510 device_printf(sc->sc_dev, 3511 "%s: could not delete node %u, error %d\n", __func__, 3512 wn->id, error); 3513 } 3514 } 3515 3516 static int 3517 wpi_updateedca(struct ieee80211com *ic) 3518 { 3519 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3520 struct wpi_softc *sc = ic->ic_softc; 3521 struct wpi_edca_params cmd; 3522 int aci, error; 3523 3524 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3525 3526 memset(&cmd, 0, sizeof cmd); 3527 cmd.flags = htole32(WPI_EDCA_UPDATE); 3528 for (aci = 0; aci < WME_NUM_AC; aci++) { 3529 const struct wmeParams *ac = 3530 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3531 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3532 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3533 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3534 cmd.ac[aci].txoplimit = 3535 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3536 3537 DPRINTF(sc, WPI_DEBUG_EDCA, 3538 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3539 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3540 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3541 cmd.ac[aci].txoplimit); 3542 } 3543 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3544 3545 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3546 3547 return error; 3548 #undef WPI_EXP2 3549 } 3550 3551 static void 3552 wpi_set_promisc(struct wpi_softc *sc) 3553 { 3554 struct ieee80211com *ic = &sc->sc_ic; 3555 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3556 uint32_t promisc_filter; 3557 3558 promisc_filter = WPI_FILTER_CTL; 3559 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3560 promisc_filter |= WPI_FILTER_PROMISC; 3561 3562 if (ic->ic_promisc > 0) 3563 sc->rxon.filter |= htole32(promisc_filter); 3564 else 3565 sc->rxon.filter &= ~htole32(promisc_filter); 3566 } 3567 3568 static void 3569 wpi_update_promisc(struct ieee80211com *ic) 3570 { 3571 struct wpi_softc *sc = ic->ic_softc; 3572 3573 WPI_LOCK(sc); 3574 if (sc->sc_running == 0) { 3575 WPI_UNLOCK(sc); 3576 return; 3577 } 3578 WPI_UNLOCK(sc); 3579 3580 WPI_RXON_LOCK(sc); 3581 wpi_set_promisc(sc); 3582 3583 if (wpi_send_rxon(sc, 1, 1) != 0) { 3584 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3585 __func__); 3586 } 3587 WPI_RXON_UNLOCK(sc); 3588 } 3589 3590 static void 3591 wpi_update_mcast(struct ieee80211com *ic) 3592 { 3593 /* Ignore */ 3594 } 3595 3596 static void 3597 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3598 { 3599 struct wpi_cmd_led led; 3600 3601 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3602 3603 led.which = which; 3604 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3605 led.off = off; 3606 led.on = on; 3607 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3608 } 3609 3610 static int 3611 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3612 { 3613 struct wpi_cmd_timing cmd; 3614 uint64_t val, mod; 3615 3616 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3617 3618 memset(&cmd, 0, sizeof cmd); 3619 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3620 cmd.bintval = htole16(ni->ni_intval); 3621 cmd.lintval = htole16(10); 3622 3623 /* Compute remaining time until next beacon. */ 3624 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3625 mod = le64toh(cmd.tstamp) % val; 3626 cmd.binitval = htole32((uint32_t)(val - mod)); 3627 3628 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3629 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3630 3631 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3632 } 3633 3634 /* 3635 * This function is called periodically (every 60 seconds) to adjust output 3636 * power to temperature changes. 3637 */ 3638 static void 3639 wpi_power_calibration(struct wpi_softc *sc) 3640 { 3641 int temp; 3642 3643 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3644 3645 /* Update sensor data. */ 3646 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3647 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3648 3649 /* Sanity-check read value. */ 3650 if (temp < -260 || temp > 25) { 3651 /* This can't be correct, ignore. */ 3652 DPRINTF(sc, WPI_DEBUG_TEMP, 3653 "out-of-range temperature reported: %d\n", temp); 3654 return; 3655 } 3656 3657 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3658 3659 /* Adjust Tx power if need be. */ 3660 if (abs(temp - sc->temp) <= 6) 3661 return; 3662 3663 sc->temp = temp; 3664 3665 if (wpi_set_txpower(sc, 1) != 0) { 3666 /* just warn, too bad for the automatic calibration... */ 3667 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3668 } 3669 } 3670 3671 /* 3672 * Set TX power for current channel. 3673 */ 3674 static int 3675 wpi_set_txpower(struct wpi_softc *sc, int async) 3676 { 3677 struct wpi_power_group *group; 3678 struct wpi_cmd_txpower cmd; 3679 uint8_t chan; 3680 int idx, is_chan_5ghz, i; 3681 3682 /* Retrieve current channel from last RXON. */ 3683 chan = sc->rxon.chan; 3684 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3685 3686 /* Find the TX power group to which this channel belongs. */ 3687 if (is_chan_5ghz) { 3688 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3689 if (chan <= group->chan) 3690 break; 3691 } else 3692 group = &sc->groups[0]; 3693 3694 memset(&cmd, 0, sizeof cmd); 3695 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3696 cmd.chan = htole16(chan); 3697 3698 /* Set TX power for all OFDM and CCK rates. */ 3699 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3700 /* Retrieve TX power for this channel/rate. */ 3701 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3702 3703 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3704 3705 if (is_chan_5ghz) { 3706 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3707 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3708 } else { 3709 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3710 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3711 } 3712 DPRINTF(sc, WPI_DEBUG_TEMP, 3713 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3714 } 3715 3716 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3717 } 3718 3719 /* 3720 * Determine Tx power index for a given channel/rate combination. 3721 * This takes into account the regulatory information from EEPROM and the 3722 * current temperature. 3723 */ 3724 static int 3725 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3726 uint8_t chan, int is_chan_5ghz, int ridx) 3727 { 3728 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3729 #define fdivround(a, b, n) \ 3730 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3731 3732 /* Linear interpolation. */ 3733 #define interpolate(x, x1, y1, x2, y2, n) \ 3734 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3735 3736 struct wpi_power_sample *sample; 3737 int pwr, idx; 3738 3739 /* Default TX power is group maximum TX power minus 3dB. */ 3740 pwr = group->maxpwr / 2; 3741 3742 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3743 switch (ridx) { 3744 case WPI_RIDX_OFDM36: 3745 pwr -= is_chan_5ghz ? 5 : 0; 3746 break; 3747 case WPI_RIDX_OFDM48: 3748 pwr -= is_chan_5ghz ? 10 : 7; 3749 break; 3750 case WPI_RIDX_OFDM54: 3751 pwr -= is_chan_5ghz ? 12 : 9; 3752 break; 3753 } 3754 3755 /* Never exceed the channel maximum allowed TX power. */ 3756 pwr = min(pwr, sc->maxpwr[chan]); 3757 3758 /* Retrieve TX power index into gain tables from samples. */ 3759 for (sample = group->samples; sample < &group->samples[3]; sample++) 3760 if (pwr > sample[1].power) 3761 break; 3762 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3763 idx = interpolate(pwr, sample[0].power, sample[0].index, 3764 sample[1].power, sample[1].index, 19); 3765 3766 /*- 3767 * Adjust power index based on current temperature: 3768 * - if cooler than factory-calibrated: decrease output power 3769 * - if warmer than factory-calibrated: increase output power 3770 */ 3771 idx -= (sc->temp - group->temp) * 11 / 100; 3772 3773 /* Decrease TX power for CCK rates (-5dB). */ 3774 if (ridx >= WPI_RIDX_CCK1) 3775 idx += 10; 3776 3777 /* Make sure idx stays in a valid range. */ 3778 if (idx < 0) 3779 return 0; 3780 if (idx > WPI_MAX_PWR_INDEX) 3781 return WPI_MAX_PWR_INDEX; 3782 return idx; 3783 3784 #undef interpolate 3785 #undef fdivround 3786 } 3787 3788 /* 3789 * Set STA mode power saving level (between 0 and 5). 3790 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3791 */ 3792 static int 3793 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3794 { 3795 struct wpi_pmgt_cmd cmd; 3796 const struct wpi_pmgt *pmgt; 3797 uint32_t max, reg; 3798 uint8_t skip_dtim; 3799 int i; 3800 3801 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3802 "%s: dtim=%d, level=%d, async=%d\n", 3803 __func__, dtim, level, async); 3804 3805 /* Select which PS parameters to use. */ 3806 if (dtim <= 10) 3807 pmgt = &wpi_pmgt[0][level]; 3808 else 3809 pmgt = &wpi_pmgt[1][level]; 3810 3811 memset(&cmd, 0, sizeof cmd); 3812 if (level != 0) /* not CAM */ 3813 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3814 /* Retrieve PCIe Active State Power Management (ASPM). */ 3815 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3816 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3817 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3818 3819 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3820 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3821 3822 if (dtim == 0) { 3823 dtim = 1; 3824 skip_dtim = 0; 3825 } else 3826 skip_dtim = pmgt->skip_dtim; 3827 3828 if (skip_dtim != 0) { 3829 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3830 max = pmgt->intval[4]; 3831 if (max == (uint32_t)-1) 3832 max = dtim * (skip_dtim + 1); 3833 else if (max > dtim) 3834 max = rounddown(max, dtim); 3835 } else 3836 max = dtim; 3837 3838 for (i = 0; i < 5; i++) 3839 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3840 3841 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3842 } 3843 3844 static int 3845 wpi_send_btcoex(struct wpi_softc *sc) 3846 { 3847 struct wpi_bluetooth cmd; 3848 3849 memset(&cmd, 0, sizeof cmd); 3850 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3851 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3852 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3853 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3854 __func__); 3855 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3856 } 3857 3858 static int 3859 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3860 { 3861 int error; 3862 3863 if (async) 3864 WPI_RXON_LOCK_ASSERT(sc); 3865 3866 if (assoc && wpi_check_bss_filter(sc) != 0) { 3867 struct wpi_assoc rxon_assoc; 3868 3869 rxon_assoc.flags = sc->rxon.flags; 3870 rxon_assoc.filter = sc->rxon.filter; 3871 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3872 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3873 rxon_assoc.reserved = 0; 3874 3875 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3876 sizeof (struct wpi_assoc), async); 3877 if (error != 0) { 3878 device_printf(sc->sc_dev, 3879 "RXON_ASSOC command failed, error %d\n", error); 3880 return error; 3881 } 3882 } else { 3883 if (async) { 3884 WPI_NT_LOCK(sc); 3885 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3886 sizeof (struct wpi_rxon), async); 3887 if (error == 0) 3888 wpi_clear_node_table(sc); 3889 WPI_NT_UNLOCK(sc); 3890 } else { 3891 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3892 sizeof (struct wpi_rxon), async); 3893 if (error == 0) 3894 wpi_clear_node_table(sc); 3895 } 3896 3897 if (error != 0) { 3898 device_printf(sc->sc_dev, 3899 "RXON command failed, error %d\n", error); 3900 return error; 3901 } 3902 3903 /* Add broadcast node. */ 3904 error = wpi_add_broadcast_node(sc, async); 3905 if (error != 0) { 3906 device_printf(sc->sc_dev, 3907 "could not add broadcast node, error %d\n", error); 3908 return error; 3909 } 3910 } 3911 3912 /* Configuration has changed, set Tx power accordingly. */ 3913 if ((error = wpi_set_txpower(sc, async)) != 0) { 3914 device_printf(sc->sc_dev, 3915 "%s: could not set TX power, error %d\n", __func__, error); 3916 return error; 3917 } 3918 3919 return 0; 3920 } 3921 3922 /** 3923 * Configure the card to listen to a particular channel, this transisions the 3924 * card in to being able to receive frames from remote devices. 3925 */ 3926 static int 3927 wpi_config(struct wpi_softc *sc) 3928 { 3929 struct ieee80211com *ic = &sc->sc_ic; 3930 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3931 struct ieee80211_channel *c = ic->ic_curchan; 3932 int error; 3933 3934 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3935 3936 /* Set power saving level to CAM during initialization. */ 3937 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3938 device_printf(sc->sc_dev, 3939 "%s: could not set power saving level\n", __func__); 3940 return error; 3941 } 3942 3943 /* Configure bluetooth coexistence. */ 3944 if ((error = wpi_send_btcoex(sc)) != 0) { 3945 device_printf(sc->sc_dev, 3946 "could not configure bluetooth coexistence\n"); 3947 return error; 3948 } 3949 3950 /* Configure adapter. */ 3951 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3952 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3953 3954 /* Set default channel. */ 3955 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3956 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3957 if (IEEE80211_IS_CHAN_2GHZ(c)) 3958 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3959 3960 sc->rxon.filter = WPI_FILTER_MULTICAST; 3961 switch (ic->ic_opmode) { 3962 case IEEE80211_M_STA: 3963 sc->rxon.mode = WPI_MODE_STA; 3964 break; 3965 case IEEE80211_M_IBSS: 3966 sc->rxon.mode = WPI_MODE_IBSS; 3967 sc->rxon.filter |= WPI_FILTER_BEACON; 3968 break; 3969 case IEEE80211_M_HOSTAP: 3970 /* XXX workaround for beaconing */ 3971 sc->rxon.mode = WPI_MODE_IBSS; 3972 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3973 break; 3974 case IEEE80211_M_AHDEMO: 3975 sc->rxon.mode = WPI_MODE_HOSTAP; 3976 break; 3977 case IEEE80211_M_MONITOR: 3978 sc->rxon.mode = WPI_MODE_MONITOR; 3979 break; 3980 default: 3981 device_printf(sc->sc_dev, "unknown opmode %d\n", 3982 ic->ic_opmode); 3983 return EINVAL; 3984 } 3985 sc->rxon.filter = htole32(sc->rxon.filter); 3986 wpi_set_promisc(sc); 3987 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3988 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3989 3990 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3991 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3992 __func__); 3993 return error; 3994 } 3995 3996 /* Setup rate scalling. */ 3997 if ((error = wpi_mrr_setup(sc)) != 0) { 3998 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3999 error); 4000 return error; 4001 } 4002 4003 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4004 4005 return 0; 4006 } 4007 4008 static uint16_t 4009 wpi_get_active_dwell_time(struct wpi_softc *sc, 4010 struct ieee80211_channel *c, uint8_t n_probes) 4011 { 4012 /* No channel? Default to 2GHz settings. */ 4013 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4014 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4015 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4016 } 4017 4018 /* 5GHz dwell time. */ 4019 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4020 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4021 } 4022 4023 /* 4024 * Limit the total dwell time. 4025 * 4026 * Returns the dwell time in milliseconds. 4027 */ 4028 static uint16_t 4029 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4030 { 4031 struct ieee80211com *ic = &sc->sc_ic; 4032 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4033 uint16_t bintval = 0; 4034 4035 /* bintval is in TU (1.024mS) */ 4036 if (vap != NULL) 4037 bintval = vap->iv_bss->ni_intval; 4038 4039 /* 4040 * If it's non-zero, we should calculate the minimum of 4041 * it and the DWELL_BASE. 4042 * 4043 * XXX Yes, the math should take into account that bintval 4044 * is 1.024mS, not 1mS.. 4045 */ 4046 if (bintval > 0) { 4047 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4048 bintval); 4049 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4050 } 4051 4052 /* No association context? Default. */ 4053 return dwell_time; 4054 } 4055 4056 static uint16_t 4057 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4058 { 4059 uint16_t passive; 4060 4061 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4062 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4063 else 4064 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4065 4066 /* Clamp to the beacon interval if we're associated. */ 4067 return (wpi_limit_dwell(sc, passive)); 4068 } 4069 4070 static uint32_t 4071 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4072 { 4073 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4074 uint32_t nbeacons = time / bintval; 4075 4076 if (mod > WPI_PAUSE_MAX_TIME) 4077 mod = WPI_PAUSE_MAX_TIME; 4078 4079 return WPI_PAUSE_SCAN(nbeacons, mod); 4080 } 4081 4082 /* 4083 * Send a scan request to the firmware. 4084 */ 4085 static int 4086 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4087 { 4088 struct ieee80211com *ic = &sc->sc_ic; 4089 struct ieee80211_scan_state *ss = ic->ic_scan; 4090 struct ieee80211vap *vap = ss->ss_vap; 4091 struct wpi_scan_hdr *hdr; 4092 struct wpi_cmd_data *tx; 4093 struct wpi_scan_essid *essids; 4094 struct wpi_scan_chan *chan; 4095 struct ieee80211_frame *wh; 4096 struct ieee80211_rateset *rs; 4097 uint16_t bintval, buflen, dwell_active, dwell_passive; 4098 uint8_t *buf, *frm, i, nssid; 4099 int bgscan, error; 4100 4101 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4102 4103 /* 4104 * We are absolutely not allowed to send a scan command when another 4105 * scan command is pending. 4106 */ 4107 if (callout_pending(&sc->scan_timeout)) { 4108 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4109 __func__); 4110 error = EAGAIN; 4111 goto fail; 4112 } 4113 4114 bgscan = wpi_check_bss_filter(sc); 4115 bintval = vap->iv_bss->ni_intval; 4116 if (bgscan != 0 && 4117 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4118 error = EOPNOTSUPP; 4119 goto fail; 4120 } 4121 4122 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4123 if (buf == NULL) { 4124 device_printf(sc->sc_dev, 4125 "%s: could not allocate buffer for scan command\n", 4126 __func__); 4127 error = ENOMEM; 4128 goto fail; 4129 } 4130 hdr = (struct wpi_scan_hdr *)buf; 4131 4132 /* 4133 * Move to the next channel if no packets are received within 10 msecs 4134 * after sending the probe request. 4135 */ 4136 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4137 hdr->quiet_threshold = htole16(1); 4138 4139 if (bgscan != 0) { 4140 /* 4141 * Max needs to be greater than active and passive and quiet! 4142 * It's also in microseconds! 4143 */ 4144 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4145 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4146 bintval)); 4147 } 4148 4149 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4150 4151 tx = (struct wpi_cmd_data *)(hdr + 1); 4152 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4153 tx->id = WPI_ID_BROADCAST; 4154 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4155 4156 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4157 /* Send probe requests at 6Mbps. */ 4158 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4159 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4160 } else { 4161 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4162 /* Send probe requests at 1Mbps. */ 4163 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4164 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4165 } 4166 4167 essids = (struct wpi_scan_essid *)(tx + 1); 4168 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4169 for (i = 0; i < nssid; i++) { 4170 essids[i].id = IEEE80211_ELEMID_SSID; 4171 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4172 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4173 #ifdef WPI_DEBUG 4174 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4175 printf("Scanning Essid: "); 4176 ieee80211_print_essid(essids[i].data, essids[i].len); 4177 printf("\n"); 4178 } 4179 #endif 4180 } 4181 4182 /* 4183 * Build a probe request frame. Most of the following code is a 4184 * copy & paste of what is done in net80211. 4185 */ 4186 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4187 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4188 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4189 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4190 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4191 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4192 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4193 4194 frm = (uint8_t *)(wh + 1); 4195 frm = ieee80211_add_ssid(frm, NULL, 0); 4196 frm = ieee80211_add_rates(frm, rs); 4197 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4198 frm = ieee80211_add_xrates(frm, rs); 4199 4200 /* Set length of probe request. */ 4201 tx->len = htole16(frm - (uint8_t *)wh); 4202 4203 /* 4204 * Construct information about the channel that we 4205 * want to scan. The firmware expects this to be directly 4206 * after the scan probe request 4207 */ 4208 chan = (struct wpi_scan_chan *)frm; 4209 chan->chan = ieee80211_chan2ieee(ic, c); 4210 chan->flags = 0; 4211 if (nssid) { 4212 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4213 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4214 } else 4215 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4216 4217 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4218 chan->flags |= WPI_CHAN_ACTIVE; 4219 4220 /* 4221 * Calculate the active/passive dwell times. 4222 */ 4223 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4224 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4225 4226 /* Make sure they're valid. */ 4227 if (dwell_active > dwell_passive) 4228 dwell_active = dwell_passive; 4229 4230 chan->active = htole16(dwell_active); 4231 chan->passive = htole16(dwell_passive); 4232 4233 chan->dsp_gain = 0x6e; /* Default level */ 4234 4235 if (IEEE80211_IS_CHAN_5GHZ(c)) 4236 chan->rf_gain = 0x3b; 4237 else 4238 chan->rf_gain = 0x28; 4239 4240 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4241 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4242 4243 hdr->nchan++; 4244 4245 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4246 /* XXX Force probe request transmission. */ 4247 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4248 4249 chan++; 4250 4251 /* Reduce unnecessary delay. */ 4252 chan->flags = 0; 4253 chan->passive = chan->active = hdr->quiet_time; 4254 4255 hdr->nchan++; 4256 } 4257 4258 chan++; 4259 4260 buflen = (uint8_t *)chan - buf; 4261 hdr->len = htole16(buflen); 4262 4263 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4264 hdr->nchan); 4265 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4266 free(buf, M_DEVBUF); 4267 4268 if (error != 0) 4269 goto fail; 4270 4271 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4272 4273 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4274 4275 return 0; 4276 4277 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4278 4279 return error; 4280 } 4281 4282 static int 4283 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4284 { 4285 struct ieee80211com *ic = vap->iv_ic; 4286 struct ieee80211_node *ni = vap->iv_bss; 4287 struct ieee80211_channel *c = ni->ni_chan; 4288 int error; 4289 4290 WPI_RXON_LOCK(sc); 4291 4292 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4293 4294 /* Update adapter configuration. */ 4295 sc->rxon.associd = 0; 4296 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4297 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4298 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4299 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4300 if (IEEE80211_IS_CHAN_2GHZ(c)) 4301 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4302 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4303 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4304 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4305 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4306 if (IEEE80211_IS_CHAN_A(c)) { 4307 sc->rxon.cck_mask = 0; 4308 sc->rxon.ofdm_mask = 0x15; 4309 } else if (IEEE80211_IS_CHAN_B(c)) { 4310 sc->rxon.cck_mask = 0x03; 4311 sc->rxon.ofdm_mask = 0; 4312 } else { 4313 /* Assume 802.11b/g. */ 4314 sc->rxon.cck_mask = 0x0f; 4315 sc->rxon.ofdm_mask = 0x15; 4316 } 4317 4318 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4319 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4320 sc->rxon.ofdm_mask); 4321 4322 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4323 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4324 __func__); 4325 } 4326 4327 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4328 4329 WPI_RXON_UNLOCK(sc); 4330 4331 return error; 4332 } 4333 4334 static int 4335 wpi_config_beacon(struct wpi_vap *wvp) 4336 { 4337 struct ieee80211vap *vap = &wvp->wv_vap; 4338 struct ieee80211com *ic = vap->iv_ic; 4339 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4340 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4341 struct wpi_softc *sc = ic->ic_softc; 4342 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4343 struct ieee80211_tim_ie *tie; 4344 struct mbuf *m; 4345 uint8_t *ptr; 4346 int error; 4347 4348 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4349 4350 WPI_VAP_LOCK_ASSERT(wvp); 4351 4352 cmd->len = htole16(bcn->m->m_pkthdr.len); 4353 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4354 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4355 4356 /* XXX seems to be unused */ 4357 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4358 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4359 ptr = mtod(bcn->m, uint8_t *); 4360 4361 cmd->tim = htole16(bo->bo_tim - ptr); 4362 cmd->timsz = tie->tim_len; 4363 } 4364 4365 /* Necessary for recursion in ieee80211_beacon_update(). */ 4366 m = bcn->m; 4367 bcn->m = m_dup(m, M_NOWAIT); 4368 if (bcn->m == NULL) { 4369 device_printf(sc->sc_dev, 4370 "%s: could not copy beacon frame\n", __func__); 4371 error = ENOMEM; 4372 goto end; 4373 } 4374 4375 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4376 device_printf(sc->sc_dev, 4377 "%s: could not update beacon frame, error %d", __func__, 4378 error); 4379 m_freem(bcn->m); 4380 } 4381 4382 /* Restore mbuf. */ 4383 end: bcn->m = m; 4384 4385 return error; 4386 } 4387 4388 static int 4389 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4390 { 4391 struct ieee80211vap *vap = ni->ni_vap; 4392 struct wpi_vap *wvp = WPI_VAP(vap); 4393 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4394 struct mbuf *m; 4395 int error; 4396 4397 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4398 4399 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4400 return EINVAL; 4401 4402 m = ieee80211_beacon_alloc(ni); 4403 if (m == NULL) { 4404 device_printf(sc->sc_dev, 4405 "%s: could not allocate beacon frame\n", __func__); 4406 return ENOMEM; 4407 } 4408 4409 WPI_VAP_LOCK(wvp); 4410 if (bcn->m != NULL) 4411 m_freem(bcn->m); 4412 4413 bcn->m = m; 4414 4415 error = wpi_config_beacon(wvp); 4416 WPI_VAP_UNLOCK(wvp); 4417 4418 return error; 4419 } 4420 4421 static void 4422 wpi_update_beacon(struct ieee80211vap *vap, int item) 4423 { 4424 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4425 struct wpi_vap *wvp = WPI_VAP(vap); 4426 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4427 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4428 struct ieee80211_node *ni = vap->iv_bss; 4429 int mcast = 0; 4430 4431 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4432 4433 WPI_VAP_LOCK(wvp); 4434 if (bcn->m == NULL) { 4435 bcn->m = ieee80211_beacon_alloc(ni); 4436 if (bcn->m == NULL) { 4437 device_printf(sc->sc_dev, 4438 "%s: could not allocate beacon frame\n", __func__); 4439 4440 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4441 __func__); 4442 4443 WPI_VAP_UNLOCK(wvp); 4444 return; 4445 } 4446 } 4447 WPI_VAP_UNLOCK(wvp); 4448 4449 if (item == IEEE80211_BEACON_TIM) 4450 mcast = 1; /* TODO */ 4451 4452 setbit(bo->bo_flags, item); 4453 ieee80211_beacon_update(ni, bcn->m, mcast); 4454 4455 WPI_VAP_LOCK(wvp); 4456 wpi_config_beacon(wvp); 4457 WPI_VAP_UNLOCK(wvp); 4458 4459 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4460 } 4461 4462 static void 4463 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4464 { 4465 struct ieee80211vap *vap = ni->ni_vap; 4466 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4467 struct wpi_node *wn = WPI_NODE(ni); 4468 int error; 4469 4470 WPI_NT_LOCK(sc); 4471 4472 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4473 4474 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4475 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4476 device_printf(sc->sc_dev, 4477 "%s: could not add IBSS node, error %d\n", 4478 __func__, error); 4479 } 4480 } 4481 WPI_NT_UNLOCK(sc); 4482 } 4483 4484 static int 4485 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4486 { 4487 struct ieee80211com *ic = vap->iv_ic; 4488 struct ieee80211_node *ni = vap->iv_bss; 4489 struct ieee80211_channel *c = ni->ni_chan; 4490 int error; 4491 4492 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4493 4494 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4495 /* Link LED blinks while monitoring. */ 4496 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4497 return 0; 4498 } 4499 4500 /* XXX kernel panic workaround */ 4501 if (c == IEEE80211_CHAN_ANYC) { 4502 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4503 __func__); 4504 return EINVAL; 4505 } 4506 4507 if ((error = wpi_set_timing(sc, ni)) != 0) { 4508 device_printf(sc->sc_dev, 4509 "%s: could not set timing, error %d\n", __func__, error); 4510 return error; 4511 } 4512 4513 /* Update adapter configuration. */ 4514 WPI_RXON_LOCK(sc); 4515 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4516 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4517 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4518 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4519 if (IEEE80211_IS_CHAN_2GHZ(c)) 4520 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4521 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4522 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4523 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4524 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4525 if (IEEE80211_IS_CHAN_A(c)) { 4526 sc->rxon.cck_mask = 0; 4527 sc->rxon.ofdm_mask = 0x15; 4528 } else if (IEEE80211_IS_CHAN_B(c)) { 4529 sc->rxon.cck_mask = 0x03; 4530 sc->rxon.ofdm_mask = 0; 4531 } else { 4532 /* Assume 802.11b/g. */ 4533 sc->rxon.cck_mask = 0x0f; 4534 sc->rxon.ofdm_mask = 0x15; 4535 } 4536 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4537 4538 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4539 sc->rxon.chan, sc->rxon.flags); 4540 4541 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4542 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4543 __func__); 4544 return error; 4545 } 4546 4547 /* Start periodic calibration timer. */ 4548 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4549 4550 WPI_RXON_UNLOCK(sc); 4551 4552 if (vap->iv_opmode == IEEE80211_M_IBSS || 4553 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4554 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4555 device_printf(sc->sc_dev, 4556 "%s: could not setup beacon, error %d\n", __func__, 4557 error); 4558 return error; 4559 } 4560 } 4561 4562 if (vap->iv_opmode == IEEE80211_M_STA) { 4563 /* Add BSS node. */ 4564 WPI_NT_LOCK(sc); 4565 error = wpi_add_sta_node(sc, ni); 4566 WPI_NT_UNLOCK(sc); 4567 if (error != 0) { 4568 device_printf(sc->sc_dev, 4569 "%s: could not add BSS node, error %d\n", __func__, 4570 error); 4571 return error; 4572 } 4573 } 4574 4575 /* Link LED always on while associated. */ 4576 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4577 4578 /* Enable power-saving mode if requested by user. */ 4579 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4580 vap->iv_opmode != IEEE80211_M_IBSS) 4581 (void)wpi_set_pslevel(sc, 0, 3, 1); 4582 4583 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4584 4585 return 0; 4586 } 4587 4588 static int 4589 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4590 { 4591 const struct ieee80211_cipher *cip = k->wk_cipher; 4592 struct ieee80211vap *vap = ni->ni_vap; 4593 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4594 struct wpi_node *wn = WPI_NODE(ni); 4595 struct wpi_node_info node; 4596 uint16_t kflags; 4597 int error; 4598 4599 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4600 4601 if (wpi_check_node_entry(sc, wn->id) == 0) { 4602 device_printf(sc->sc_dev, "%s: node does not exist\n", 4603 __func__); 4604 return 0; 4605 } 4606 4607 switch (cip->ic_cipher) { 4608 case IEEE80211_CIPHER_AES_CCM: 4609 kflags = WPI_KFLAG_CCMP; 4610 break; 4611 4612 default: 4613 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4614 cip->ic_cipher); 4615 return 0; 4616 } 4617 4618 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4619 if (k->wk_flags & IEEE80211_KEY_GROUP) 4620 kflags |= WPI_KFLAG_MULTICAST; 4621 4622 memset(&node, 0, sizeof node); 4623 node.id = wn->id; 4624 node.control = WPI_NODE_UPDATE; 4625 node.flags = WPI_FLAG_KEY_SET; 4626 node.kflags = htole16(kflags); 4627 memcpy(node.key, k->wk_key, k->wk_keylen); 4628 again: 4629 DPRINTF(sc, WPI_DEBUG_KEY, 4630 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4631 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4632 node.id, ether_sprintf(ni->ni_macaddr)); 4633 4634 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4635 if (error != 0) { 4636 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4637 error); 4638 return !error; 4639 } 4640 4641 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4642 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4643 kflags |= WPI_KFLAG_MULTICAST; 4644 node.kflags = htole16(kflags); 4645 4646 goto again; 4647 } 4648 4649 return 1; 4650 } 4651 4652 static void 4653 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4654 { 4655 const struct ieee80211_key *k = arg; 4656 struct ieee80211vap *vap = ni->ni_vap; 4657 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4658 struct wpi_node *wn = WPI_NODE(ni); 4659 int error; 4660 4661 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4662 return; 4663 4664 WPI_NT_LOCK(sc); 4665 error = wpi_load_key(ni, k); 4666 WPI_NT_UNLOCK(sc); 4667 4668 if (error == 0) { 4669 device_printf(sc->sc_dev, "%s: error while setting key\n", 4670 __func__); 4671 } 4672 } 4673 4674 static int 4675 wpi_set_global_keys(struct ieee80211_node *ni) 4676 { 4677 struct ieee80211vap *vap = ni->ni_vap; 4678 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4679 int error = 1; 4680 4681 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4682 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4683 error = wpi_load_key(ni, wk); 4684 4685 return !error; 4686 } 4687 4688 static int 4689 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4690 { 4691 struct ieee80211vap *vap = ni->ni_vap; 4692 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4693 struct wpi_node *wn = WPI_NODE(ni); 4694 struct wpi_node_info node; 4695 uint16_t kflags; 4696 int error; 4697 4698 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4699 4700 if (wpi_check_node_entry(sc, wn->id) == 0) { 4701 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4702 return 1; /* Nothing to do. */ 4703 } 4704 4705 kflags = WPI_KFLAG_KID(k->wk_keyix); 4706 if (k->wk_flags & IEEE80211_KEY_GROUP) 4707 kflags |= WPI_KFLAG_MULTICAST; 4708 4709 memset(&node, 0, sizeof node); 4710 node.id = wn->id; 4711 node.control = WPI_NODE_UPDATE; 4712 node.flags = WPI_FLAG_KEY_SET; 4713 node.kflags = htole16(kflags); 4714 again: 4715 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4716 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4717 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4718 4719 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4720 if (error != 0) { 4721 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4722 error); 4723 return !error; 4724 } 4725 4726 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4727 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4728 kflags |= WPI_KFLAG_MULTICAST; 4729 node.kflags = htole16(kflags); 4730 4731 goto again; 4732 } 4733 4734 return 1; 4735 } 4736 4737 static void 4738 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4739 { 4740 const struct ieee80211_key *k = arg; 4741 struct ieee80211vap *vap = ni->ni_vap; 4742 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4743 struct wpi_node *wn = WPI_NODE(ni); 4744 int error; 4745 4746 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4747 return; 4748 4749 WPI_NT_LOCK(sc); 4750 error = wpi_del_key(ni, k); 4751 WPI_NT_UNLOCK(sc); 4752 4753 if (error == 0) { 4754 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4755 __func__); 4756 } 4757 } 4758 4759 static int 4760 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4761 int set) 4762 { 4763 struct ieee80211com *ic = vap->iv_ic; 4764 struct wpi_softc *sc = ic->ic_softc; 4765 struct wpi_vap *wvp = WPI_VAP(vap); 4766 struct ieee80211_node *ni; 4767 int error, ni_ref = 0; 4768 4769 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4770 4771 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4772 /* Not for us. */ 4773 return 1; 4774 } 4775 4776 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4777 /* XMIT keys are handled in wpi_tx_data(). */ 4778 return 1; 4779 } 4780 4781 /* Handle group keys. */ 4782 if (&vap->iv_nw_keys[0] <= k && 4783 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4784 WPI_NT_LOCK(sc); 4785 if (set) 4786 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4787 else 4788 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4789 WPI_NT_UNLOCK(sc); 4790 4791 if (vap->iv_state == IEEE80211_S_RUN) { 4792 ieee80211_iterate_nodes(&ic->ic_sta, 4793 set ? wpi_load_key_cb : wpi_del_key_cb, 4794 __DECONST(void *, k)); 4795 } 4796 4797 return 1; 4798 } 4799 4800 switch (vap->iv_opmode) { 4801 case IEEE80211_M_STA: 4802 ni = vap->iv_bss; 4803 break; 4804 4805 case IEEE80211_M_IBSS: 4806 case IEEE80211_M_AHDEMO: 4807 case IEEE80211_M_HOSTAP: 4808 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4809 if (ni == NULL) 4810 return 0; /* should not happen */ 4811 4812 ni_ref = 1; 4813 break; 4814 4815 default: 4816 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4817 vap->iv_opmode); 4818 return 0; 4819 } 4820 4821 WPI_NT_LOCK(sc); 4822 if (set) 4823 error = wpi_load_key(ni, k); 4824 else 4825 error = wpi_del_key(ni, k); 4826 WPI_NT_UNLOCK(sc); 4827 4828 if (ni_ref) 4829 ieee80211_node_decref(ni); 4830 4831 return error; 4832 } 4833 4834 static int 4835 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4836 { 4837 return wpi_process_key(vap, k, 1); 4838 } 4839 4840 static int 4841 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4842 { 4843 return wpi_process_key(vap, k, 0); 4844 } 4845 4846 /* 4847 * This function is called after the runtime firmware notifies us of its 4848 * readiness (called in a process context). 4849 */ 4850 static int 4851 wpi_post_alive(struct wpi_softc *sc) 4852 { 4853 int ntries, error; 4854 4855 /* Check (again) that the radio is not disabled. */ 4856 if ((error = wpi_nic_lock(sc)) != 0) 4857 return error; 4858 4859 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4860 4861 /* NB: Runtime firmware must be up and running. */ 4862 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4863 device_printf(sc->sc_dev, 4864 "RF switch: radio disabled (%s)\n", __func__); 4865 wpi_nic_unlock(sc); 4866 return EPERM; /* :-) */ 4867 } 4868 wpi_nic_unlock(sc); 4869 4870 /* Wait for thermal sensor to calibrate. */ 4871 for (ntries = 0; ntries < 1000; ntries++) { 4872 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4873 break; 4874 DELAY(10); 4875 } 4876 4877 if (ntries == 1000) { 4878 device_printf(sc->sc_dev, 4879 "timeout waiting for thermal sensor calibration\n"); 4880 return ETIMEDOUT; 4881 } 4882 4883 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4884 return 0; 4885 } 4886 4887 /* 4888 * The firmware boot code is small and is intended to be copied directly into 4889 * the NIC internal memory (no DMA transfer). 4890 */ 4891 static int 4892 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4893 { 4894 int error, ntries; 4895 4896 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4897 4898 size /= sizeof (uint32_t); 4899 4900 if ((error = wpi_nic_lock(sc)) != 0) 4901 return error; 4902 4903 /* Copy microcode image into NIC memory. */ 4904 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4905 (const uint32_t *)ucode, size); 4906 4907 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4908 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4909 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4910 4911 /* Start boot load now. */ 4912 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4913 4914 /* Wait for transfer to complete. */ 4915 for (ntries = 0; ntries < 1000; ntries++) { 4916 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4917 DPRINTF(sc, WPI_DEBUG_HW, 4918 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4919 WPI_FH_TX_STATUS_IDLE(6), 4920 status & WPI_FH_TX_STATUS_IDLE(6)); 4921 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4922 DPRINTF(sc, WPI_DEBUG_HW, 4923 "Status Match! - ntries = %d\n", ntries); 4924 break; 4925 } 4926 DELAY(10); 4927 } 4928 if (ntries == 1000) { 4929 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4930 __func__); 4931 wpi_nic_unlock(sc); 4932 return ETIMEDOUT; 4933 } 4934 4935 /* Enable boot after power up. */ 4936 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4937 4938 wpi_nic_unlock(sc); 4939 return 0; 4940 } 4941 4942 static int 4943 wpi_load_firmware(struct wpi_softc *sc) 4944 { 4945 struct wpi_fw_info *fw = &sc->fw; 4946 struct wpi_dma_info *dma = &sc->fw_dma; 4947 int error; 4948 4949 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4950 4951 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4952 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4953 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4954 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4955 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4956 4957 /* Tell adapter where to find initialization sections. */ 4958 if ((error = wpi_nic_lock(sc)) != 0) 4959 return error; 4960 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4961 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4962 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4963 dma->paddr + WPI_FW_DATA_MAXSZ); 4964 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4965 wpi_nic_unlock(sc); 4966 4967 /* Load firmware boot code. */ 4968 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4969 if (error != 0) { 4970 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4971 __func__); 4972 return error; 4973 } 4974 4975 /* Now press "execute". */ 4976 WPI_WRITE(sc, WPI_RESET, 0); 4977 4978 /* Wait at most one second for first alive notification. */ 4979 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4980 device_printf(sc->sc_dev, 4981 "%s: timeout waiting for adapter to initialize, error %d\n", 4982 __func__, error); 4983 return error; 4984 } 4985 4986 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4987 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4988 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4989 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4990 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4991 4992 /* Tell adapter where to find runtime sections. */ 4993 if ((error = wpi_nic_lock(sc)) != 0) 4994 return error; 4995 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4996 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4997 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4998 dma->paddr + WPI_FW_DATA_MAXSZ); 4999 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 5000 WPI_FW_UPDATED | fw->main.textsz); 5001 wpi_nic_unlock(sc); 5002 5003 return 0; 5004 } 5005 5006 static int 5007 wpi_read_firmware(struct wpi_softc *sc) 5008 { 5009 const struct firmware *fp; 5010 struct wpi_fw_info *fw = &sc->fw; 5011 const struct wpi_firmware_hdr *hdr; 5012 int error; 5013 5014 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5015 5016 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5017 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5018 5019 WPI_UNLOCK(sc); 5020 fp = firmware_get(WPI_FW_NAME); 5021 WPI_LOCK(sc); 5022 5023 if (fp == NULL) { 5024 device_printf(sc->sc_dev, 5025 "could not load firmware image '%s'\n", WPI_FW_NAME); 5026 return EINVAL; 5027 } 5028 5029 sc->fw_fp = fp; 5030 5031 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5032 device_printf(sc->sc_dev, 5033 "firmware file too short: %zu bytes\n", fp->datasize); 5034 error = EINVAL; 5035 goto fail; 5036 } 5037 5038 fw->size = fp->datasize; 5039 fw->data = (const uint8_t *)fp->data; 5040 5041 /* Extract firmware header information. */ 5042 hdr = (const struct wpi_firmware_hdr *)fw->data; 5043 5044 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5045 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5046 5047 fw->main.textsz = le32toh(hdr->rtextsz); 5048 fw->main.datasz = le32toh(hdr->rdatasz); 5049 fw->init.textsz = le32toh(hdr->itextsz); 5050 fw->init.datasz = le32toh(hdr->idatasz); 5051 fw->boot.textsz = le32toh(hdr->btextsz); 5052 fw->boot.datasz = 0; 5053 5054 /* Sanity-check firmware header. */ 5055 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5056 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5057 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5058 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5059 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5060 (fw->boot.textsz & 3) != 0) { 5061 device_printf(sc->sc_dev, "invalid firmware header\n"); 5062 error = EINVAL; 5063 goto fail; 5064 } 5065 5066 /* Check that all firmware sections fit. */ 5067 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5068 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5069 device_printf(sc->sc_dev, 5070 "firmware file too short: %zu bytes\n", fw->size); 5071 error = EINVAL; 5072 goto fail; 5073 } 5074 5075 /* Get pointers to firmware sections. */ 5076 fw->main.text = (const uint8_t *)(hdr + 1); 5077 fw->main.data = fw->main.text + fw->main.textsz; 5078 fw->init.text = fw->main.data + fw->main.datasz; 5079 fw->init.data = fw->init.text + fw->init.textsz; 5080 fw->boot.text = fw->init.data + fw->init.datasz; 5081 5082 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5083 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5084 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5085 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5086 fw->main.textsz, fw->main.datasz, 5087 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5088 5089 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5090 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5091 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5092 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5093 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5094 5095 return 0; 5096 5097 fail: wpi_unload_firmware(sc); 5098 return error; 5099 } 5100 5101 /** 5102 * Free the referenced firmware image 5103 */ 5104 static void 5105 wpi_unload_firmware(struct wpi_softc *sc) 5106 { 5107 if (sc->fw_fp != NULL) { 5108 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5109 sc->fw_fp = NULL; 5110 } 5111 } 5112 5113 static int 5114 wpi_clock_wait(struct wpi_softc *sc) 5115 { 5116 int ntries; 5117 5118 /* Set "initialization complete" bit. */ 5119 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5120 5121 /* Wait for clock stabilization. */ 5122 for (ntries = 0; ntries < 2500; ntries++) { 5123 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5124 return 0; 5125 DELAY(100); 5126 } 5127 device_printf(sc->sc_dev, 5128 "%s: timeout waiting for clock stabilization\n", __func__); 5129 5130 return ETIMEDOUT; 5131 } 5132 5133 static int 5134 wpi_apm_init(struct wpi_softc *sc) 5135 { 5136 uint32_t reg; 5137 int error; 5138 5139 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5140 5141 /* Disable L0s exit timer (NMI bug workaround). */ 5142 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5143 /* Don't wait for ICH L0s (ICH bug workaround). */ 5144 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5145 5146 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5147 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5148 5149 /* Retrieve PCIe Active State Power Management (ASPM). */ 5150 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5151 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5152 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5153 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5154 else 5155 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5156 5157 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5158 5159 /* Wait for clock stabilization before accessing prph. */ 5160 if ((error = wpi_clock_wait(sc)) != 0) 5161 return error; 5162 5163 if ((error = wpi_nic_lock(sc)) != 0) 5164 return error; 5165 /* Cleanup. */ 5166 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5167 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5168 5169 /* Enable DMA and BSM (Bootstrap State Machine). */ 5170 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5171 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5172 DELAY(20); 5173 /* Disable L1-Active. */ 5174 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5175 wpi_nic_unlock(sc); 5176 5177 return 0; 5178 } 5179 5180 static void 5181 wpi_apm_stop_master(struct wpi_softc *sc) 5182 { 5183 int ntries; 5184 5185 /* Stop busmaster DMA activity. */ 5186 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5187 5188 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5189 WPI_GP_CNTRL_MAC_PS) 5190 return; /* Already asleep. */ 5191 5192 for (ntries = 0; ntries < 100; ntries++) { 5193 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5194 return; 5195 DELAY(10); 5196 } 5197 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5198 __func__); 5199 } 5200 5201 static void 5202 wpi_apm_stop(struct wpi_softc *sc) 5203 { 5204 wpi_apm_stop_master(sc); 5205 5206 /* Reset the entire device. */ 5207 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5208 DELAY(10); 5209 /* Clear "initialization complete" bit. */ 5210 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5211 } 5212 5213 static void 5214 wpi_nic_config(struct wpi_softc *sc) 5215 { 5216 uint32_t rev; 5217 5218 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5219 5220 /* voodoo from the Linux "driver".. */ 5221 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5222 if ((rev & 0xc0) == 0x40) 5223 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5224 else if (!(rev & 0x80)) 5225 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5226 5227 if (sc->cap == 0x80) 5228 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5229 5230 if ((sc->rev & 0xf0) == 0xd0) 5231 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5232 else 5233 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5234 5235 if (sc->type > 1) 5236 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5237 } 5238 5239 static int 5240 wpi_hw_init(struct wpi_softc *sc) 5241 { 5242 uint8_t chnl; 5243 int ntries, error; 5244 5245 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5246 5247 /* Clear pending interrupts. */ 5248 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5249 5250 if ((error = wpi_apm_init(sc)) != 0) { 5251 device_printf(sc->sc_dev, 5252 "%s: could not power ON adapter, error %d\n", __func__, 5253 error); 5254 return error; 5255 } 5256 5257 /* Select VMAIN power source. */ 5258 if ((error = wpi_nic_lock(sc)) != 0) 5259 return error; 5260 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5261 wpi_nic_unlock(sc); 5262 /* Spin until VMAIN gets selected. */ 5263 for (ntries = 0; ntries < 5000; ntries++) { 5264 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5265 break; 5266 DELAY(10); 5267 } 5268 if (ntries == 5000) { 5269 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5270 return ETIMEDOUT; 5271 } 5272 5273 /* Perform adapter initialization. */ 5274 wpi_nic_config(sc); 5275 5276 /* Initialize RX ring. */ 5277 if ((error = wpi_nic_lock(sc)) != 0) 5278 return error; 5279 /* Set physical address of RX ring. */ 5280 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5281 /* Set physical address of RX read pointer. */ 5282 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5283 offsetof(struct wpi_shared, next)); 5284 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5285 /* Enable RX. */ 5286 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5287 WPI_FH_RX_CONFIG_DMA_ENA | 5288 WPI_FH_RX_CONFIG_RDRBD_ENA | 5289 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5290 WPI_FH_RX_CONFIG_MAXFRAG | 5291 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5292 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5293 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5294 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5295 wpi_nic_unlock(sc); 5296 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5297 5298 /* Initialize TX rings. */ 5299 if ((error = wpi_nic_lock(sc)) != 0) 5300 return error; 5301 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5302 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5303 /* Enable all 6 TX rings. */ 5304 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5305 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5306 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5307 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5308 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5309 /* Set physical address of TX rings. */ 5310 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5311 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5312 5313 /* Enable all DMA channels. */ 5314 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5315 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5316 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5317 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5318 } 5319 wpi_nic_unlock(sc); 5320 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5321 5322 /* Clear "radio off" and "commands blocked" bits. */ 5323 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5324 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5325 5326 /* Clear pending interrupts. */ 5327 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5328 /* Enable interrupts. */ 5329 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5330 5331 /* _Really_ make sure "radio off" bit is cleared! */ 5332 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5333 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5334 5335 if ((error = wpi_load_firmware(sc)) != 0) { 5336 device_printf(sc->sc_dev, 5337 "%s: could not load firmware, error %d\n", __func__, 5338 error); 5339 return error; 5340 } 5341 /* Wait at most one second for firmware alive notification. */ 5342 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5343 device_printf(sc->sc_dev, 5344 "%s: timeout waiting for adapter to initialize, error %d\n", 5345 __func__, error); 5346 return error; 5347 } 5348 5349 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5350 5351 /* Do post-firmware initialization. */ 5352 return wpi_post_alive(sc); 5353 } 5354 5355 static void 5356 wpi_hw_stop(struct wpi_softc *sc) 5357 { 5358 uint8_t chnl, qid; 5359 int ntries; 5360 5361 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5362 5363 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5364 wpi_nic_lock(sc); 5365 5366 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5367 5368 /* Disable interrupts. */ 5369 WPI_WRITE(sc, WPI_INT_MASK, 0); 5370 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5371 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5372 5373 /* Make sure we no longer hold the NIC lock. */ 5374 wpi_nic_unlock(sc); 5375 5376 if (wpi_nic_lock(sc) == 0) { 5377 /* Stop TX scheduler. */ 5378 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5379 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5380 5381 /* Stop all DMA channels. */ 5382 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5383 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5384 for (ntries = 0; ntries < 200; ntries++) { 5385 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5386 WPI_FH_TX_STATUS_IDLE(chnl)) 5387 break; 5388 DELAY(10); 5389 } 5390 } 5391 wpi_nic_unlock(sc); 5392 } 5393 5394 /* Stop RX ring. */ 5395 wpi_reset_rx_ring(sc); 5396 5397 /* Reset all TX rings. */ 5398 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5399 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5400 5401 if (wpi_nic_lock(sc) == 0) { 5402 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5403 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5404 wpi_nic_unlock(sc); 5405 } 5406 DELAY(5); 5407 /* Power OFF adapter. */ 5408 wpi_apm_stop(sc); 5409 } 5410 5411 static void 5412 wpi_radio_on(void *arg0, int pending) 5413 { 5414 struct wpi_softc *sc = arg0; 5415 struct ieee80211com *ic = &sc->sc_ic; 5416 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5417 5418 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5419 5420 WPI_LOCK(sc); 5421 callout_stop(&sc->watchdog_rfkill); 5422 WPI_UNLOCK(sc); 5423 5424 if (vap != NULL) 5425 ieee80211_init(vap); 5426 } 5427 5428 static void 5429 wpi_radio_off(void *arg0, int pending) 5430 { 5431 struct wpi_softc *sc = arg0; 5432 struct ieee80211com *ic = &sc->sc_ic; 5433 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5434 5435 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5436 5437 ieee80211_notify_radio(ic, 0); 5438 wpi_stop(sc); 5439 if (vap != NULL) 5440 ieee80211_stop(vap); 5441 5442 WPI_LOCK(sc); 5443 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5444 WPI_UNLOCK(sc); 5445 } 5446 5447 static int 5448 wpi_init(struct wpi_softc *sc) 5449 { 5450 int error = 0; 5451 5452 WPI_LOCK(sc); 5453 5454 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5455 5456 if (sc->sc_running != 0) 5457 goto end; 5458 5459 /* Check that the radio is not disabled by hardware switch. */ 5460 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5461 device_printf(sc->sc_dev, 5462 "RF switch: radio disabled (%s)\n", __func__); 5463 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5464 sc); 5465 error = EINPROGRESS; 5466 goto end; 5467 } 5468 5469 /* Read firmware images from the filesystem. */ 5470 if ((error = wpi_read_firmware(sc)) != 0) { 5471 device_printf(sc->sc_dev, 5472 "%s: could not read firmware, error %d\n", __func__, 5473 error); 5474 goto end; 5475 } 5476 5477 sc->sc_running = 1; 5478 5479 /* Initialize hardware and upload firmware. */ 5480 error = wpi_hw_init(sc); 5481 wpi_unload_firmware(sc); 5482 if (error != 0) { 5483 device_printf(sc->sc_dev, 5484 "%s: could not initialize hardware, error %d\n", __func__, 5485 error); 5486 goto fail; 5487 } 5488 5489 /* Configure adapter now that it is ready. */ 5490 if ((error = wpi_config(sc)) != 0) { 5491 device_printf(sc->sc_dev, 5492 "%s: could not configure device, error %d\n", __func__, 5493 error); 5494 goto fail; 5495 } 5496 5497 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5498 5499 WPI_UNLOCK(sc); 5500 5501 return 0; 5502 5503 fail: wpi_stop_locked(sc); 5504 5505 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5506 WPI_UNLOCK(sc); 5507 5508 return error; 5509 } 5510 5511 static void 5512 wpi_stop_locked(struct wpi_softc *sc) 5513 { 5514 5515 WPI_LOCK_ASSERT(sc); 5516 5517 if (sc->sc_running == 0) 5518 return; 5519 5520 WPI_TX_LOCK(sc); 5521 WPI_TXQ_LOCK(sc); 5522 sc->sc_running = 0; 5523 WPI_TXQ_UNLOCK(sc); 5524 WPI_TX_UNLOCK(sc); 5525 5526 WPI_TXQ_STATE_LOCK(sc); 5527 callout_stop(&sc->tx_timeout); 5528 WPI_TXQ_STATE_UNLOCK(sc); 5529 5530 WPI_RXON_LOCK(sc); 5531 callout_stop(&sc->scan_timeout); 5532 callout_stop(&sc->calib_to); 5533 WPI_RXON_UNLOCK(sc); 5534 5535 /* Power OFF hardware. */ 5536 wpi_hw_stop(sc); 5537 } 5538 5539 static void 5540 wpi_stop(struct wpi_softc *sc) 5541 { 5542 WPI_LOCK(sc); 5543 wpi_stop_locked(sc); 5544 WPI_UNLOCK(sc); 5545 } 5546 5547 /* 5548 * Callback from net80211 to start a scan. 5549 */ 5550 static void 5551 wpi_scan_start(struct ieee80211com *ic) 5552 { 5553 struct wpi_softc *sc = ic->ic_softc; 5554 5555 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5556 } 5557 5558 /* 5559 * Callback from net80211 to terminate a scan. 5560 */ 5561 static void 5562 wpi_scan_end(struct ieee80211com *ic) 5563 { 5564 struct wpi_softc *sc = ic->ic_softc; 5565 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5566 5567 if (vap->iv_state == IEEE80211_S_RUN) 5568 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5569 } 5570 5571 /** 5572 * Called by the net80211 framework to indicate to the driver 5573 * that the channel should be changed 5574 */ 5575 static void 5576 wpi_set_channel(struct ieee80211com *ic) 5577 { 5578 const struct ieee80211_channel *c = ic->ic_curchan; 5579 struct wpi_softc *sc = ic->ic_softc; 5580 int error; 5581 5582 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5583 5584 WPI_LOCK(sc); 5585 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5586 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5587 WPI_UNLOCK(sc); 5588 WPI_TX_LOCK(sc); 5589 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5590 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5591 WPI_TX_UNLOCK(sc); 5592 5593 /* 5594 * Only need to set the channel in Monitor mode. AP scanning and auth 5595 * are already taken care of by their respective firmware commands. 5596 */ 5597 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5598 WPI_RXON_LOCK(sc); 5599 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5600 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5601 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5602 WPI_RXON_24GHZ); 5603 } else { 5604 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5605 WPI_RXON_24GHZ); 5606 } 5607 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5608 device_printf(sc->sc_dev, 5609 "%s: error %d setting channel\n", __func__, 5610 error); 5611 WPI_RXON_UNLOCK(sc); 5612 } 5613 } 5614 5615 /** 5616 * Called by net80211 to indicate that we need to scan the current 5617 * channel. The channel is previously be set via the wpi_set_channel 5618 * callback. 5619 */ 5620 static void 5621 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5622 { 5623 struct ieee80211vap *vap = ss->ss_vap; 5624 struct ieee80211com *ic = vap->iv_ic; 5625 struct wpi_softc *sc = ic->ic_softc; 5626 int error; 5627 5628 WPI_RXON_LOCK(sc); 5629 error = wpi_scan(sc, ic->ic_curchan); 5630 WPI_RXON_UNLOCK(sc); 5631 if (error != 0) 5632 ieee80211_cancel_scan(vap); 5633 } 5634 5635 /** 5636 * Called by the net80211 framework to indicate 5637 * the minimum dwell time has been met, terminate the scan. 5638 * We don't actually terminate the scan as the firmware will notify 5639 * us when it's finished and we have no way to interrupt it. 5640 */ 5641 static void 5642 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5643 { 5644 /* NB: don't try to abort scan; wait for firmware to finish */ 5645 } 5646