1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 int); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, int); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, int); 176 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 181 const struct ieee80211_rx_stats *, 182 int, int); 183 static void wpi_restore_node(void *, struct ieee80211_node *); 184 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 185 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static void wpi_calib_timeout(void *); 187 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 188 struct wpi_rx_data *); 189 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 190 struct wpi_rx_data *); 191 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_notif_intr(struct wpi_softc *); 194 static void wpi_wakeup_intr(struct wpi_softc *); 195 #ifdef WPI_DEBUG 196 static void wpi_debug_registers(struct wpi_softc *); 197 #endif 198 static void wpi_fatal_intr(struct wpi_softc *); 199 static void wpi_intr(void *); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 209 static void wpi_start(void *, int); 210 static void wpi_watchdog_rfkill(void *); 211 static void wpi_scan_timeout(void *); 212 static void wpi_tx_timeout(void *); 213 static void wpi_parent(struct ieee80211com *); 214 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 215 static int wpi_mrr_setup(struct wpi_softc *); 216 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 217 static int wpi_add_broadcast_node(struct wpi_softc *, int); 218 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 219 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 220 static int wpi_updateedca(struct ieee80211com *); 221 static void wpi_set_promisc(struct wpi_softc *); 222 static void wpi_update_promisc(struct ieee80211com *); 223 static void wpi_update_mcast(struct ieee80211com *); 224 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 225 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 226 static void wpi_power_calibration(struct wpi_softc *); 227 static int wpi_set_txpower(struct wpi_softc *, int); 228 static int wpi_get_power_index(struct wpi_softc *, 229 struct wpi_power_group *, uint8_t, int, int); 230 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 231 static int wpi_send_btcoex(struct wpi_softc *); 232 static int wpi_send_rxon(struct wpi_softc *, int, int); 233 static int wpi_config(struct wpi_softc *); 234 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 235 struct ieee80211_channel *, uint8_t); 236 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 237 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 238 struct ieee80211_channel *); 239 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 240 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 241 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 242 static int wpi_config_beacon(struct wpi_vap *); 243 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 244 static void wpi_update_beacon(struct ieee80211vap *, int); 245 static void wpi_newassoc(struct ieee80211_node *, int); 246 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 247 static int wpi_load_key(struct ieee80211_node *, 248 const struct ieee80211_key *); 249 static void wpi_load_key_cb(void *, struct ieee80211_node *); 250 static int wpi_set_global_keys(struct ieee80211_node *); 251 static int wpi_del_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_del_key_cb(void *, struct ieee80211_node *); 254 static int wpi_process_key(struct ieee80211vap *, 255 const struct ieee80211_key *, int); 256 static int wpi_key_set(struct ieee80211vap *, 257 const struct ieee80211_key *); 258 static int wpi_key_delete(struct ieee80211vap *, 259 const struct ieee80211_key *); 260 static int wpi_post_alive(struct wpi_softc *); 261 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 262 static int wpi_load_firmware(struct wpi_softc *); 263 static int wpi_read_firmware(struct wpi_softc *); 264 static void wpi_unload_firmware(struct wpi_softc *); 265 static int wpi_clock_wait(struct wpi_softc *); 266 static int wpi_apm_init(struct wpi_softc *); 267 static void wpi_apm_stop_master(struct wpi_softc *); 268 static void wpi_apm_stop(struct wpi_softc *); 269 static void wpi_nic_config(struct wpi_softc *); 270 static int wpi_hw_init(struct wpi_softc *); 271 static void wpi_hw_stop(struct wpi_softc *); 272 static void wpi_radio_on(void *, int); 273 static void wpi_radio_off(void *, int); 274 static int wpi_init(struct wpi_softc *); 275 static void wpi_stop_locked(struct wpi_softc *); 276 static void wpi_stop(struct wpi_softc *); 277 static void wpi_scan_start(struct ieee80211com *); 278 static void wpi_scan_end(struct ieee80211com *); 279 static void wpi_set_channel(struct ieee80211com *); 280 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 281 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 282 static void wpi_hw_reset(void *, int); 283 284 static device_method_t wpi_methods[] = { 285 /* Device interface */ 286 DEVMETHOD(device_probe, wpi_probe), 287 DEVMETHOD(device_attach, wpi_attach), 288 DEVMETHOD(device_detach, wpi_detach), 289 DEVMETHOD(device_shutdown, wpi_shutdown), 290 DEVMETHOD(device_suspend, wpi_suspend), 291 DEVMETHOD(device_resume, wpi_resume), 292 293 DEVMETHOD_END 294 }; 295 296 static driver_t wpi_driver = { 297 "wpi", 298 wpi_methods, 299 sizeof (struct wpi_softc) 300 }; 301 static devclass_t wpi_devclass; 302 303 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 304 305 MODULE_VERSION(wpi, 1); 306 307 MODULE_DEPEND(wpi, pci, 1, 1, 1); 308 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 309 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 310 311 static int 312 wpi_probe(device_t dev) 313 { 314 const struct wpi_ident *ident; 315 316 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 317 if (pci_get_vendor(dev) == ident->vendor && 318 pci_get_device(dev) == ident->device) { 319 device_set_desc(dev, ident->name); 320 return (BUS_PROBE_DEFAULT); 321 } 322 } 323 return ENXIO; 324 } 325 326 static int 327 wpi_attach(device_t dev) 328 { 329 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 330 struct ieee80211com *ic; 331 int i, error, rid; 332 #ifdef WPI_DEBUG 333 int supportsa = 1; 334 const struct wpi_ident *ident; 335 #endif 336 337 sc->sc_dev = dev; 338 339 #ifdef WPI_DEBUG 340 error = resource_int_value(device_get_name(sc->sc_dev), 341 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 342 if (error != 0) 343 sc->sc_debug = 0; 344 #else 345 sc->sc_debug = 0; 346 #endif 347 348 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 349 350 /* 351 * Get the offset of the PCI Express Capability Structure in PCI 352 * Configuration Space. 353 */ 354 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 355 if (error != 0) { 356 device_printf(dev, "PCIe capability structure not found!\n"); 357 return error; 358 } 359 360 /* 361 * Some card's only support 802.11b/g not a, check to see if 362 * this is one such card. A 0x0 in the subdevice table indicates 363 * the entire subdevice range is to be ignored. 364 */ 365 #ifdef WPI_DEBUG 366 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 367 if (ident->subdevice && 368 pci_get_subdevice(dev) == ident->subdevice) { 369 supportsa = 0; 370 break; 371 } 372 } 373 #endif 374 375 /* Clear device-specific "PCI retry timeout" register (41h). */ 376 pci_write_config(dev, 0x41, 0, 1); 377 378 /* Enable bus-mastering. */ 379 pci_enable_busmaster(dev); 380 381 rid = PCIR_BAR(0); 382 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 383 RF_ACTIVE); 384 if (sc->mem == NULL) { 385 device_printf(dev, "can't map mem space\n"); 386 return ENOMEM; 387 } 388 sc->sc_st = rman_get_bustag(sc->mem); 389 sc->sc_sh = rman_get_bushandle(sc->mem); 390 391 i = 1; 392 rid = 0; 393 if (pci_alloc_msi(dev, &i) == 0) 394 rid = 1; 395 /* Install interrupt handler. */ 396 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 397 (rid != 0 ? 0 : RF_SHAREABLE)); 398 if (sc->irq == NULL) { 399 device_printf(dev, "can't map interrupt\n"); 400 error = ENOMEM; 401 goto fail; 402 } 403 404 WPI_LOCK_INIT(sc); 405 WPI_TX_LOCK_INIT(sc); 406 WPI_RXON_LOCK_INIT(sc); 407 WPI_NT_LOCK_INIT(sc); 408 WPI_TXQ_LOCK_INIT(sc); 409 WPI_TXQ_STATE_LOCK_INIT(sc); 410 411 /* Allocate DMA memory for firmware transfers. */ 412 if ((error = wpi_alloc_fwmem(sc)) != 0) { 413 device_printf(dev, 414 "could not allocate memory for firmware, error %d\n", 415 error); 416 goto fail; 417 } 418 419 /* Allocate shared page. */ 420 if ((error = wpi_alloc_shared(sc)) != 0) { 421 device_printf(dev, "could not allocate shared page\n"); 422 goto fail; 423 } 424 425 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 426 for (i = 0; i < WPI_NTXQUEUES; i++) { 427 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 428 device_printf(dev, 429 "could not allocate TX ring %d, error %d\n", i, 430 error); 431 goto fail; 432 } 433 } 434 435 /* Allocate RX ring. */ 436 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 437 device_printf(dev, "could not allocate RX ring, error %d\n", 438 error); 439 goto fail; 440 } 441 442 /* Clear pending interrupts. */ 443 WPI_WRITE(sc, WPI_INT, 0xffffffff); 444 445 ic = &sc->sc_ic; 446 ic->ic_softc = sc; 447 ic->ic_name = device_get_nameunit(dev); 448 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 449 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 450 451 /* Set device capabilities. */ 452 ic->ic_caps = 453 IEEE80211_C_STA /* station mode supported */ 454 | IEEE80211_C_IBSS /* IBSS mode supported */ 455 | IEEE80211_C_HOSTAP /* Host access point mode */ 456 | IEEE80211_C_MONITOR /* monitor mode supported */ 457 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 458 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 459 | IEEE80211_C_TXPMGT /* tx power management */ 460 | IEEE80211_C_SHSLOT /* short slot time supported */ 461 | IEEE80211_C_WPA /* 802.11i */ 462 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 463 | IEEE80211_C_WME /* 802.11e */ 464 | IEEE80211_C_PMGT /* Station-side power mgmt */ 465 ; 466 467 ic->ic_cryptocaps = 468 IEEE80211_CRYPTO_AES_CCM; 469 470 /* 471 * Read in the eeprom and also setup the channels for 472 * net80211. We don't set the rates as net80211 does this for us 473 */ 474 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 475 device_printf(dev, "could not read EEPROM, error %d\n", 476 error); 477 goto fail; 478 } 479 480 #ifdef WPI_DEBUG 481 if (bootverbose) { 482 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 483 sc->domain); 484 device_printf(sc->sc_dev, "Hardware Type: %c\n", 485 sc->type > 1 ? 'B': '?'); 486 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 487 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 488 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 489 supportsa ? "does" : "does not"); 490 491 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 492 check what sc->rev really represents - benjsc 20070615 */ 493 } 494 #endif 495 496 ieee80211_ifattach(ic); 497 ic->ic_vap_create = wpi_vap_create; 498 ic->ic_vap_delete = wpi_vap_delete; 499 ic->ic_parent = wpi_parent; 500 ic->ic_raw_xmit = wpi_raw_xmit; 501 ic->ic_transmit = wpi_transmit; 502 ic->ic_node_alloc = wpi_node_alloc; 503 sc->sc_node_free = ic->ic_node_free; 504 ic->ic_node_free = wpi_node_free; 505 ic->ic_wme.wme_update = wpi_updateedca; 506 ic->ic_update_promisc = wpi_update_promisc; 507 ic->ic_update_mcast = wpi_update_mcast; 508 ic->ic_newassoc = wpi_newassoc; 509 ic->ic_scan_start = wpi_scan_start; 510 ic->ic_scan_end = wpi_scan_end; 511 ic->ic_set_channel = wpi_set_channel; 512 ic->ic_scan_curchan = wpi_scan_curchan; 513 ic->ic_scan_mindwell = wpi_scan_mindwell; 514 ic->ic_setregdomain = wpi_setregdomain; 515 516 sc->sc_update_rx_ring = wpi_update_rx_ring; 517 sc->sc_update_tx_ring = wpi_update_tx_ring; 518 519 wpi_radiotap_attach(sc); 520 521 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 522 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 523 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 524 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 525 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 526 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 527 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 528 TASK_INIT(&sc->sc_start_task, 0, wpi_start, sc); 529 530 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 531 taskqueue_thread_enqueue, &sc->sc_tq); 532 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 533 if (error != 0) { 534 device_printf(dev, "can't start threads, error %d\n", error); 535 goto fail; 536 } 537 538 wpi_sysctlattach(sc); 539 540 /* 541 * Hook our interrupt after all initialization is complete. 542 */ 543 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 544 NULL, wpi_intr, sc, &sc->sc_ih); 545 if (error != 0) { 546 device_printf(dev, "can't establish interrupt, error %d\n", 547 error); 548 goto fail; 549 } 550 551 if (bootverbose) 552 ieee80211_announce(ic); 553 554 #ifdef WPI_DEBUG 555 if (sc->sc_debug & WPI_DEBUG_HW) 556 ieee80211_announce_channels(ic); 557 #endif 558 559 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 560 return 0; 561 562 fail: wpi_detach(dev); 563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 564 return error; 565 } 566 567 /* 568 * Attach the interface to 802.11 radiotap. 569 */ 570 static void 571 wpi_radiotap_attach(struct wpi_softc *sc) 572 { 573 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 574 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 575 576 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 577 ieee80211_radiotap_attach(&sc->sc_ic, 578 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 579 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 581 } 582 583 static void 584 wpi_sysctlattach(struct wpi_softc *sc) 585 { 586 #ifdef WPI_DEBUG 587 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 588 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 589 590 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 591 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 592 "control debugging printfs"); 593 #endif 594 } 595 596 static void 597 wpi_init_beacon(struct wpi_vap *wvp) 598 { 599 struct wpi_buf *bcn = &wvp->wv_bcbuf; 600 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 601 602 cmd->id = WPI_ID_BROADCAST; 603 cmd->ofdm_mask = 0xff; 604 cmd->cck_mask = 0x0f; 605 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 606 607 /* 608 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 609 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 610 */ 611 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 612 613 bcn->code = WPI_CMD_SET_BEACON; 614 bcn->ac = WPI_CMD_QUEUE_NUM; 615 bcn->size = sizeof(struct wpi_cmd_beacon); 616 } 617 618 static struct ieee80211vap * 619 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 620 enum ieee80211_opmode opmode, int flags, 621 const uint8_t bssid[IEEE80211_ADDR_LEN], 622 const uint8_t mac[IEEE80211_ADDR_LEN]) 623 { 624 struct wpi_vap *wvp; 625 struct ieee80211vap *vap; 626 627 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 628 return NULL; 629 630 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 631 vap = &wvp->wv_vap; 632 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 633 634 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 635 WPI_VAP_LOCK_INIT(wvp); 636 wpi_init_beacon(wvp); 637 } 638 639 /* Override with driver methods. */ 640 vap->iv_key_set = wpi_key_set; 641 vap->iv_key_delete = wpi_key_delete; 642 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 643 vap->iv_recv_mgmt = wpi_recv_mgmt; 644 wvp->wv_newstate = vap->iv_newstate; 645 vap->iv_newstate = wpi_newstate; 646 vap->iv_update_beacon = wpi_update_beacon; 647 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 648 649 ieee80211_ratectl_init(vap); 650 /* Complete setup. */ 651 ieee80211_vap_attach(vap, ieee80211_media_change, 652 ieee80211_media_status, mac); 653 ic->ic_opmode = opmode; 654 return vap; 655 } 656 657 static void 658 wpi_vap_delete(struct ieee80211vap *vap) 659 { 660 struct wpi_vap *wvp = WPI_VAP(vap); 661 struct wpi_buf *bcn = &wvp->wv_bcbuf; 662 enum ieee80211_opmode opmode = vap->iv_opmode; 663 664 ieee80211_ratectl_deinit(vap); 665 ieee80211_vap_detach(vap); 666 667 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 668 if (bcn->m != NULL) 669 m_freem(bcn->m); 670 671 WPI_VAP_LOCK_DESTROY(wvp); 672 } 673 674 free(wvp, M_80211_VAP); 675 } 676 677 static int 678 wpi_detach(device_t dev) 679 { 680 struct wpi_softc *sc = device_get_softc(dev); 681 struct ieee80211com *ic = &sc->sc_ic; 682 int qid; 683 684 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 685 686 if (ic->ic_vap_create == wpi_vap_create) { 687 ieee80211_draintask(ic, &sc->sc_radioon_task); 688 ieee80211_draintask(ic, &sc->sc_start_task); 689 690 wpi_stop(sc); 691 692 if (sc->sc_tq != NULL) { 693 taskqueue_drain_all(sc->sc_tq); 694 taskqueue_free(sc->sc_tq); 695 } 696 697 callout_drain(&sc->watchdog_rfkill); 698 callout_drain(&sc->tx_timeout); 699 callout_drain(&sc->scan_timeout); 700 callout_drain(&sc->calib_to); 701 ieee80211_ifdetach(ic); 702 } 703 704 /* Uninstall interrupt handler. */ 705 if (sc->irq != NULL) { 706 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 707 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 708 sc->irq); 709 pci_release_msi(dev); 710 } 711 712 if (sc->txq[0].data_dmat) { 713 /* Free DMA resources. */ 714 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 715 wpi_free_tx_ring(sc, &sc->txq[qid]); 716 717 wpi_free_rx_ring(sc); 718 wpi_free_shared(sc); 719 } 720 721 if (sc->fw_dma.tag) 722 wpi_free_fwmem(sc); 723 724 if (sc->mem != NULL) 725 bus_release_resource(dev, SYS_RES_MEMORY, 726 rman_get_rid(sc->mem), sc->mem); 727 728 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 729 WPI_TXQ_STATE_LOCK_DESTROY(sc); 730 WPI_TXQ_LOCK_DESTROY(sc); 731 WPI_NT_LOCK_DESTROY(sc); 732 WPI_RXON_LOCK_DESTROY(sc); 733 WPI_TX_LOCK_DESTROY(sc); 734 WPI_LOCK_DESTROY(sc); 735 return 0; 736 } 737 738 static int 739 wpi_shutdown(device_t dev) 740 { 741 struct wpi_softc *sc = device_get_softc(dev); 742 743 wpi_stop(sc); 744 return 0; 745 } 746 747 static int 748 wpi_suspend(device_t dev) 749 { 750 struct wpi_softc *sc = device_get_softc(dev); 751 struct ieee80211com *ic = &sc->sc_ic; 752 753 ieee80211_suspend_all(ic); 754 return 0; 755 } 756 757 static int 758 wpi_resume(device_t dev) 759 { 760 struct wpi_softc *sc = device_get_softc(dev); 761 struct ieee80211com *ic = &sc->sc_ic; 762 763 /* Clear device-specific "PCI retry timeout" register (41h). */ 764 pci_write_config(dev, 0x41, 0, 1); 765 766 ieee80211_resume_all(ic); 767 return 0; 768 } 769 770 /* 771 * Grab exclusive access to NIC memory. 772 */ 773 static int 774 wpi_nic_lock(struct wpi_softc *sc) 775 { 776 int ntries; 777 778 /* Request exclusive access to NIC. */ 779 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 780 781 /* Spin until we actually get the lock. */ 782 for (ntries = 0; ntries < 1000; ntries++) { 783 if ((WPI_READ(sc, WPI_GP_CNTRL) & 784 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 785 WPI_GP_CNTRL_MAC_ACCESS_ENA) 786 return 0; 787 DELAY(10); 788 } 789 790 device_printf(sc->sc_dev, "could not lock memory\n"); 791 792 return ETIMEDOUT; 793 } 794 795 /* 796 * Release lock on NIC memory. 797 */ 798 static __inline void 799 wpi_nic_unlock(struct wpi_softc *sc) 800 { 801 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 802 } 803 804 static __inline uint32_t 805 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 806 { 807 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 808 WPI_BARRIER_READ_WRITE(sc); 809 return WPI_READ(sc, WPI_PRPH_RDATA); 810 } 811 812 static __inline void 813 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 814 { 815 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 816 WPI_BARRIER_WRITE(sc); 817 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 818 } 819 820 static __inline void 821 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 822 { 823 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 824 } 825 826 static __inline void 827 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 828 { 829 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 830 } 831 832 static __inline void 833 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 834 const uint32_t *data, int count) 835 { 836 for (; count > 0; count--, data++, addr += 4) 837 wpi_prph_write(sc, addr, *data); 838 } 839 840 static __inline uint32_t 841 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 842 { 843 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 844 WPI_BARRIER_READ_WRITE(sc); 845 return WPI_READ(sc, WPI_MEM_RDATA); 846 } 847 848 static __inline void 849 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 850 int count) 851 { 852 for (; count > 0; count--, addr += 4) 853 *data++ = wpi_mem_read(sc, addr); 854 } 855 856 static int 857 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 858 { 859 uint8_t *out = data; 860 uint32_t val; 861 int error, ntries; 862 863 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 864 865 if ((error = wpi_nic_lock(sc)) != 0) 866 return error; 867 868 for (; count > 0; count -= 2, addr++) { 869 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 870 for (ntries = 0; ntries < 10; ntries++) { 871 val = WPI_READ(sc, WPI_EEPROM); 872 if (val & WPI_EEPROM_READ_VALID) 873 break; 874 DELAY(5); 875 } 876 if (ntries == 10) { 877 device_printf(sc->sc_dev, 878 "timeout reading ROM at 0x%x\n", addr); 879 return ETIMEDOUT; 880 } 881 *out++= val >> 16; 882 if (count > 1) 883 *out ++= val >> 24; 884 } 885 886 wpi_nic_unlock(sc); 887 888 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 889 890 return 0; 891 } 892 893 static void 894 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 895 { 896 if (error != 0) 897 return; 898 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 899 *(bus_addr_t *)arg = segs[0].ds_addr; 900 } 901 902 /* 903 * Allocates a contiguous block of dma memory of the requested size and 904 * alignment. 905 */ 906 static int 907 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 908 void **kvap, bus_size_t size, bus_size_t alignment) 909 { 910 int error; 911 912 dma->tag = NULL; 913 dma->size = size; 914 915 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 916 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 917 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 918 if (error != 0) 919 goto fail; 920 921 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 922 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 923 if (error != 0) 924 goto fail; 925 926 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 927 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 928 if (error != 0) 929 goto fail; 930 931 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 932 933 if (kvap != NULL) 934 *kvap = dma->vaddr; 935 936 return 0; 937 938 fail: wpi_dma_contig_free(dma); 939 return error; 940 } 941 942 static void 943 wpi_dma_contig_free(struct wpi_dma_info *dma) 944 { 945 if (dma->vaddr != NULL) { 946 bus_dmamap_sync(dma->tag, dma->map, 947 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 948 bus_dmamap_unload(dma->tag, dma->map); 949 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 950 dma->vaddr = NULL; 951 } 952 if (dma->tag != NULL) { 953 bus_dma_tag_destroy(dma->tag); 954 dma->tag = NULL; 955 } 956 } 957 958 /* 959 * Allocate a shared page between host and NIC. 960 */ 961 static int 962 wpi_alloc_shared(struct wpi_softc *sc) 963 { 964 /* Shared buffer must be aligned on a 4KB boundary. */ 965 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 966 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 967 } 968 969 static void 970 wpi_free_shared(struct wpi_softc *sc) 971 { 972 wpi_dma_contig_free(&sc->shared_dma); 973 } 974 975 /* 976 * Allocate DMA-safe memory for firmware transfer. 977 */ 978 static int 979 wpi_alloc_fwmem(struct wpi_softc *sc) 980 { 981 /* Must be aligned on a 16-byte boundary. */ 982 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 983 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 984 } 985 986 static void 987 wpi_free_fwmem(struct wpi_softc *sc) 988 { 989 wpi_dma_contig_free(&sc->fw_dma); 990 } 991 992 static int 993 wpi_alloc_rx_ring(struct wpi_softc *sc) 994 { 995 struct wpi_rx_ring *ring = &sc->rxq; 996 bus_size_t size; 997 int i, error; 998 999 ring->cur = 0; 1000 ring->update = 0; 1001 1002 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1003 1004 /* Allocate RX descriptors (16KB aligned.) */ 1005 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1006 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1007 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1008 if (error != 0) { 1009 device_printf(sc->sc_dev, 1010 "%s: could not allocate RX ring DMA memory, error %d\n", 1011 __func__, error); 1012 goto fail; 1013 } 1014 1015 /* Create RX buffer DMA tag. */ 1016 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1017 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1018 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1019 &ring->data_dmat); 1020 if (error != 0) { 1021 device_printf(sc->sc_dev, 1022 "%s: could not create RX buf DMA tag, error %d\n", 1023 __func__, error); 1024 goto fail; 1025 } 1026 1027 /* 1028 * Allocate and map RX buffers. 1029 */ 1030 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1031 struct wpi_rx_data *data = &ring->data[i]; 1032 bus_addr_t paddr; 1033 1034 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1035 if (error != 0) { 1036 device_printf(sc->sc_dev, 1037 "%s: could not create RX buf DMA map, error %d\n", 1038 __func__, error); 1039 goto fail; 1040 } 1041 1042 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1043 if (data->m == NULL) { 1044 device_printf(sc->sc_dev, 1045 "%s: could not allocate RX mbuf\n", __func__); 1046 error = ENOBUFS; 1047 goto fail; 1048 } 1049 1050 error = bus_dmamap_load(ring->data_dmat, data->map, 1051 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1052 &paddr, BUS_DMA_NOWAIT); 1053 if (error != 0 && error != EFBIG) { 1054 device_printf(sc->sc_dev, 1055 "%s: can't map mbuf (error %d)\n", __func__, 1056 error); 1057 goto fail; 1058 } 1059 1060 /* Set physical address of RX buffer. */ 1061 ring->desc[i] = htole32(paddr); 1062 } 1063 1064 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1065 BUS_DMASYNC_PREWRITE); 1066 1067 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1068 1069 return 0; 1070 1071 fail: wpi_free_rx_ring(sc); 1072 1073 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1074 1075 return error; 1076 } 1077 1078 static void 1079 wpi_update_rx_ring(struct wpi_softc *sc) 1080 { 1081 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1082 } 1083 1084 static void 1085 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1086 { 1087 struct wpi_rx_ring *ring = &sc->rxq; 1088 1089 if (ring->update != 0) { 1090 /* Wait for INT_WAKEUP event. */ 1091 return; 1092 } 1093 1094 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1095 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1096 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1097 __func__); 1098 ring->update = 1; 1099 } else { 1100 wpi_update_rx_ring(sc); 1101 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1102 } 1103 } 1104 1105 static void 1106 wpi_reset_rx_ring(struct wpi_softc *sc) 1107 { 1108 struct wpi_rx_ring *ring = &sc->rxq; 1109 int ntries; 1110 1111 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1112 1113 if (wpi_nic_lock(sc) == 0) { 1114 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1115 for (ntries = 0; ntries < 1000; ntries++) { 1116 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1117 WPI_FH_RX_STATUS_IDLE) 1118 break; 1119 DELAY(10); 1120 } 1121 wpi_nic_unlock(sc); 1122 } 1123 1124 ring->cur = 0; 1125 ring->update = 0; 1126 } 1127 1128 static void 1129 wpi_free_rx_ring(struct wpi_softc *sc) 1130 { 1131 struct wpi_rx_ring *ring = &sc->rxq; 1132 int i; 1133 1134 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1135 1136 wpi_dma_contig_free(&ring->desc_dma); 1137 1138 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1139 struct wpi_rx_data *data = &ring->data[i]; 1140 1141 if (data->m != NULL) { 1142 bus_dmamap_sync(ring->data_dmat, data->map, 1143 BUS_DMASYNC_POSTREAD); 1144 bus_dmamap_unload(ring->data_dmat, data->map); 1145 m_freem(data->m); 1146 data->m = NULL; 1147 } 1148 if (data->map != NULL) 1149 bus_dmamap_destroy(ring->data_dmat, data->map); 1150 } 1151 if (ring->data_dmat != NULL) { 1152 bus_dma_tag_destroy(ring->data_dmat); 1153 ring->data_dmat = NULL; 1154 } 1155 } 1156 1157 static int 1158 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1159 { 1160 bus_addr_t paddr; 1161 bus_size_t size; 1162 int i, error; 1163 1164 ring->qid = qid; 1165 ring->queued = 0; 1166 ring->cur = 0; 1167 ring->update = 0; 1168 mbufq_init(&ring->snd, ifqmaxlen); 1169 1170 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1171 1172 /* Allocate TX descriptors (16KB aligned.) */ 1173 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1174 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1175 size, WPI_RING_DMA_ALIGN); 1176 if (error != 0) { 1177 device_printf(sc->sc_dev, 1178 "%s: could not allocate TX ring DMA memory, error %d\n", 1179 __func__, error); 1180 goto fail; 1181 } 1182 1183 /* Update shared area with ring physical address. */ 1184 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1185 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1186 BUS_DMASYNC_PREWRITE); 1187 1188 /* 1189 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1190 * to allocate commands space for other rings. 1191 * XXX Do we really need to allocate descriptors for other rings? 1192 */ 1193 if (qid > WPI_CMD_QUEUE_NUM) { 1194 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1195 return 0; 1196 } 1197 1198 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1199 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1200 size, 4); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not allocate TX cmd DMA memory, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1209 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1210 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1211 &ring->data_dmat); 1212 if (error != 0) { 1213 device_printf(sc->sc_dev, 1214 "%s: could not create TX buf DMA tag, error %d\n", 1215 __func__, error); 1216 goto fail; 1217 } 1218 1219 paddr = ring->cmd_dma.paddr; 1220 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1221 struct wpi_tx_data *data = &ring->data[i]; 1222 1223 data->cmd_paddr = paddr; 1224 paddr += sizeof (struct wpi_tx_cmd); 1225 1226 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1227 if (error != 0) { 1228 device_printf(sc->sc_dev, 1229 "%s: could not create TX buf DMA map, error %d\n", 1230 __func__, error); 1231 goto fail; 1232 } 1233 } 1234 1235 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1236 1237 return 0; 1238 1239 fail: wpi_free_tx_ring(sc, ring); 1240 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1241 return error; 1242 } 1243 1244 static void 1245 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1246 { 1247 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1248 } 1249 1250 static void 1251 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1252 { 1253 1254 if (ring->update != 0) { 1255 /* Wait for INT_WAKEUP event. */ 1256 return; 1257 } 1258 1259 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1260 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1261 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1262 __func__, ring->qid); 1263 ring->update = 1; 1264 } else { 1265 wpi_update_tx_ring(sc, ring); 1266 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1267 } 1268 } 1269 1270 static void 1271 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1272 { 1273 int i; 1274 1275 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1276 1277 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1278 struct wpi_tx_data *data = &ring->data[i]; 1279 1280 if (data->m != NULL) { 1281 bus_dmamap_sync(ring->data_dmat, data->map, 1282 BUS_DMASYNC_POSTWRITE); 1283 bus_dmamap_unload(ring->data_dmat, data->map); 1284 m_freem(data->m); 1285 data->m = NULL; 1286 } 1287 if (data->ni != NULL) { 1288 ieee80211_free_node(data->ni); 1289 data->ni = NULL; 1290 } 1291 } 1292 /* Clear TX descriptors. */ 1293 memset(ring->desc, 0, ring->desc_dma.size); 1294 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1295 BUS_DMASYNC_PREWRITE); 1296 mbufq_drain(&ring->snd); 1297 sc->qfullmsk &= ~(1 << ring->qid); 1298 ring->queued = 0; 1299 ring->cur = 0; 1300 ring->update = 0; 1301 } 1302 1303 static void 1304 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1305 { 1306 int i; 1307 1308 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1309 1310 wpi_dma_contig_free(&ring->desc_dma); 1311 wpi_dma_contig_free(&ring->cmd_dma); 1312 1313 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1314 struct wpi_tx_data *data = &ring->data[i]; 1315 1316 if (data->m != NULL) { 1317 bus_dmamap_sync(ring->data_dmat, data->map, 1318 BUS_DMASYNC_POSTWRITE); 1319 bus_dmamap_unload(ring->data_dmat, data->map); 1320 m_freem(data->m); 1321 } 1322 if (data->map != NULL) 1323 bus_dmamap_destroy(ring->data_dmat, data->map); 1324 } 1325 if (ring->data_dmat != NULL) { 1326 bus_dma_tag_destroy(ring->data_dmat); 1327 ring->data_dmat = NULL; 1328 } 1329 } 1330 1331 /* 1332 * Extract various information from EEPROM. 1333 */ 1334 static int 1335 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1336 { 1337 #define WPI_CHK(res) do { \ 1338 if ((error = res) != 0) \ 1339 goto fail; \ 1340 } while (0) 1341 int error, i; 1342 1343 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1344 1345 /* Adapter has to be powered on for EEPROM access to work. */ 1346 if ((error = wpi_apm_init(sc)) != 0) { 1347 device_printf(sc->sc_dev, 1348 "%s: could not power ON adapter, error %d\n", __func__, 1349 error); 1350 return error; 1351 } 1352 1353 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1354 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1355 error = EIO; 1356 goto fail; 1357 } 1358 /* Clear HW ownership of EEPROM. */ 1359 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1360 1361 /* Read the hardware capabilities, revision and SKU type. */ 1362 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1363 sizeof(sc->cap))); 1364 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1365 sizeof(sc->rev))); 1366 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1367 sizeof(sc->type))); 1368 1369 sc->rev = le16toh(sc->rev); 1370 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1371 sc->rev, sc->type); 1372 1373 /* Read the regulatory domain (4 ASCII characters.) */ 1374 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1375 sizeof(sc->domain))); 1376 1377 /* Read MAC address. */ 1378 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1379 IEEE80211_ADDR_LEN)); 1380 1381 /* Read the list of authorized channels. */ 1382 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1383 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1384 1385 /* Read the list of TX power groups. */ 1386 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1387 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1388 1389 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1390 1391 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1392 __func__); 1393 1394 return error; 1395 #undef WPI_CHK 1396 } 1397 1398 /* 1399 * Translate EEPROM flags to net80211. 1400 */ 1401 static uint32_t 1402 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1403 { 1404 uint32_t nflags; 1405 1406 nflags = 0; 1407 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1408 nflags |= IEEE80211_CHAN_PASSIVE; 1409 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1410 nflags |= IEEE80211_CHAN_NOADHOC; 1411 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1412 nflags |= IEEE80211_CHAN_DFS; 1413 /* XXX apparently IBSS may still be marked */ 1414 nflags |= IEEE80211_CHAN_NOADHOC; 1415 } 1416 1417 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1418 if (nflags & IEEE80211_CHAN_NOADHOC) 1419 nflags |= IEEE80211_CHAN_NOHOSTAP; 1420 1421 return nflags; 1422 } 1423 1424 static void 1425 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1426 { 1427 struct ieee80211com *ic = &sc->sc_ic; 1428 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1429 const struct wpi_chan_band *band = &wpi_bands[n]; 1430 struct ieee80211_channel *c; 1431 uint8_t chan; 1432 int i, nflags; 1433 1434 for (i = 0; i < band->nchan; i++) { 1435 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1436 DPRINTF(sc, WPI_DEBUG_EEPROM, 1437 "Channel Not Valid: %d, band %d\n", 1438 band->chan[i],n); 1439 continue; 1440 } 1441 1442 chan = band->chan[i]; 1443 nflags = wpi_eeprom_channel_flags(&channels[i]); 1444 1445 c = &ic->ic_channels[ic->ic_nchans++]; 1446 c->ic_ieee = chan; 1447 c->ic_maxregpower = channels[i].maxpwr; 1448 c->ic_maxpower = 2*c->ic_maxregpower; 1449 1450 if (n == 0) { /* 2GHz band */ 1451 c->ic_freq = ieee80211_ieee2mhz(chan, 1452 IEEE80211_CHAN_G); 1453 1454 /* G =>'s B is supported */ 1455 c->ic_flags = IEEE80211_CHAN_B | nflags; 1456 c = &ic->ic_channels[ic->ic_nchans++]; 1457 c[0] = c[-1]; 1458 c->ic_flags = IEEE80211_CHAN_G | nflags; 1459 } else { /* 5GHz band */ 1460 c->ic_freq = ieee80211_ieee2mhz(chan, 1461 IEEE80211_CHAN_A); 1462 1463 c->ic_flags = IEEE80211_CHAN_A | nflags; 1464 } 1465 1466 /* Save maximum allowed TX power for this channel. */ 1467 sc->maxpwr[chan] = channels[i].maxpwr; 1468 1469 DPRINTF(sc, WPI_DEBUG_EEPROM, 1470 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1471 " offset %d\n", chan, c->ic_freq, 1472 channels[i].flags, sc->maxpwr[chan], 1473 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1474 } 1475 } 1476 1477 /** 1478 * Read the eeprom to find out what channels are valid for the given 1479 * band and update net80211 with what we find. 1480 */ 1481 static int 1482 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1483 { 1484 struct ieee80211com *ic = &sc->sc_ic; 1485 const struct wpi_chan_band *band = &wpi_bands[n]; 1486 int error; 1487 1488 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1489 1490 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1491 band->nchan * sizeof (struct wpi_eeprom_chan)); 1492 if (error != 0) { 1493 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1494 return error; 1495 } 1496 1497 wpi_read_eeprom_band(sc, n); 1498 1499 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1500 1501 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1502 1503 return 0; 1504 } 1505 1506 static struct wpi_eeprom_chan * 1507 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1508 { 1509 int i, j; 1510 1511 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1512 for (i = 0; i < wpi_bands[j].nchan; i++) 1513 if (wpi_bands[j].chan[i] == c->ic_ieee) 1514 return &sc->eeprom_channels[j][i]; 1515 1516 return NULL; 1517 } 1518 1519 /* 1520 * Enforce flags read from EEPROM. 1521 */ 1522 static int 1523 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1524 int nchan, struct ieee80211_channel chans[]) 1525 { 1526 struct wpi_softc *sc = ic->ic_softc; 1527 int i; 1528 1529 for (i = 0; i < nchan; i++) { 1530 struct ieee80211_channel *c = &chans[i]; 1531 struct wpi_eeprom_chan *channel; 1532 1533 channel = wpi_find_eeprom_channel(sc, c); 1534 if (channel == NULL) { 1535 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1536 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1537 return EINVAL; 1538 } 1539 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1540 } 1541 1542 return 0; 1543 } 1544 1545 static int 1546 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1547 { 1548 struct wpi_power_group *group = &sc->groups[n]; 1549 struct wpi_eeprom_group rgroup; 1550 int i, error; 1551 1552 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1553 1554 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1555 &rgroup, sizeof rgroup)) != 0) { 1556 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1557 return error; 1558 } 1559 1560 /* Save TX power group information. */ 1561 group->chan = rgroup.chan; 1562 group->maxpwr = rgroup.maxpwr; 1563 /* Retrieve temperature at which the samples were taken. */ 1564 group->temp = (int16_t)le16toh(rgroup.temp); 1565 1566 DPRINTF(sc, WPI_DEBUG_EEPROM, 1567 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1568 group->maxpwr, group->temp); 1569 1570 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1571 group->samples[i].index = rgroup.samples[i].index; 1572 group->samples[i].power = rgroup.samples[i].power; 1573 1574 DPRINTF(sc, WPI_DEBUG_EEPROM, 1575 "\tsample %d: index=%d power=%d\n", i, 1576 group->samples[i].index, group->samples[i].power); 1577 } 1578 1579 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1580 1581 return 0; 1582 } 1583 1584 static int 1585 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1586 { 1587 int newid = WPI_ID_IBSS_MIN; 1588 1589 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1590 if ((sc->nodesmsk & (1 << newid)) == 0) { 1591 sc->nodesmsk |= 1 << newid; 1592 return newid; 1593 } 1594 } 1595 1596 return WPI_ID_UNDEFINED; 1597 } 1598 1599 static __inline int 1600 wpi_add_node_entry_sta(struct wpi_softc *sc) 1601 { 1602 sc->nodesmsk |= 1 << WPI_ID_BSS; 1603 1604 return WPI_ID_BSS; 1605 } 1606 1607 static __inline int 1608 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1609 { 1610 if (id == WPI_ID_UNDEFINED) 1611 return 0; 1612 1613 return (sc->nodesmsk >> id) & 1; 1614 } 1615 1616 static __inline void 1617 wpi_clear_node_table(struct wpi_softc *sc) 1618 { 1619 sc->nodesmsk = 0; 1620 } 1621 1622 static __inline void 1623 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1624 { 1625 sc->nodesmsk &= ~(1 << id); 1626 } 1627 1628 static struct ieee80211_node * 1629 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1630 { 1631 struct wpi_node *wn; 1632 1633 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1634 M_NOWAIT | M_ZERO); 1635 1636 if (wn == NULL) 1637 return NULL; 1638 1639 wn->id = WPI_ID_UNDEFINED; 1640 1641 return &wn->ni; 1642 } 1643 1644 static void 1645 wpi_node_free(struct ieee80211_node *ni) 1646 { 1647 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1648 struct wpi_node *wn = WPI_NODE(ni); 1649 1650 if (wn->id != WPI_ID_UNDEFINED) { 1651 WPI_NT_LOCK(sc); 1652 if (wpi_check_node_entry(sc, wn->id)) { 1653 wpi_del_node_entry(sc, wn->id); 1654 wpi_del_node(sc, ni); 1655 } 1656 WPI_NT_UNLOCK(sc); 1657 } 1658 1659 sc->sc_node_free(ni); 1660 } 1661 1662 static __inline int 1663 wpi_check_bss_filter(struct wpi_softc *sc) 1664 { 1665 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1666 } 1667 1668 static void 1669 wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1670 const struct ieee80211_rx_stats *rxs, 1671 int rssi, int nf) 1672 { 1673 struct ieee80211vap *vap = ni->ni_vap; 1674 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1675 struct wpi_vap *wvp = WPI_VAP(vap); 1676 uint64_t ni_tstamp, rx_tstamp; 1677 1678 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1679 1680 if (vap->iv_opmode == IEEE80211_M_IBSS && 1681 vap->iv_state == IEEE80211_S_RUN && 1682 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1683 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1684 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1685 rx_tstamp = le64toh(sc->rx_tstamp); 1686 1687 if (ni_tstamp >= rx_tstamp) { 1688 DPRINTF(sc, WPI_DEBUG_STATE, 1689 "ibss merge, tsf %ju tstamp %ju\n", 1690 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1691 (void) ieee80211_ibss_merge(ni); 1692 } 1693 } 1694 } 1695 1696 static void 1697 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1698 { 1699 struct wpi_softc *sc = arg; 1700 struct wpi_node *wn = WPI_NODE(ni); 1701 int error; 1702 1703 WPI_NT_LOCK(sc); 1704 if (wn->id != WPI_ID_UNDEFINED) { 1705 wn->id = WPI_ID_UNDEFINED; 1706 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1707 device_printf(sc->sc_dev, 1708 "%s: could not add IBSS node, error %d\n", 1709 __func__, error); 1710 } 1711 } 1712 WPI_NT_UNLOCK(sc); 1713 } 1714 1715 static void 1716 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1717 { 1718 struct ieee80211com *ic = &sc->sc_ic; 1719 1720 /* Set group keys once. */ 1721 WPI_NT_LOCK(sc); 1722 wvp->wv_gtk = 0; 1723 WPI_NT_UNLOCK(sc); 1724 1725 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1726 ieee80211_crypto_reload_keys(ic); 1727 } 1728 1729 /** 1730 * Called by net80211 when ever there is a change to 80211 state machine 1731 */ 1732 static int 1733 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1734 { 1735 struct wpi_vap *wvp = WPI_VAP(vap); 1736 struct ieee80211com *ic = vap->iv_ic; 1737 struct wpi_softc *sc = ic->ic_softc; 1738 int error = 0; 1739 1740 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1741 1742 WPI_TXQ_LOCK(sc); 1743 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1744 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1745 WPI_TXQ_UNLOCK(sc); 1746 1747 return ENXIO; 1748 } 1749 WPI_TXQ_UNLOCK(sc); 1750 1751 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1752 ieee80211_state_name[vap->iv_state], 1753 ieee80211_state_name[nstate]); 1754 1755 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1756 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1757 device_printf(sc->sc_dev, 1758 "%s: could not set power saving level\n", 1759 __func__); 1760 return error; 1761 } 1762 1763 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1764 } 1765 1766 switch (nstate) { 1767 case IEEE80211_S_SCAN: 1768 WPI_RXON_LOCK(sc); 1769 if (wpi_check_bss_filter(sc) != 0) { 1770 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1771 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1772 device_printf(sc->sc_dev, 1773 "%s: could not send RXON\n", __func__); 1774 } 1775 } 1776 WPI_RXON_UNLOCK(sc); 1777 break; 1778 1779 case IEEE80211_S_ASSOC: 1780 if (vap->iv_state != IEEE80211_S_RUN) 1781 break; 1782 /* FALLTHROUGH */ 1783 case IEEE80211_S_AUTH: 1784 /* 1785 * NB: do not optimize AUTH -> AUTH state transmission - 1786 * this will break powersave with non-QoS AP! 1787 */ 1788 1789 /* 1790 * The node must be registered in the firmware before auth. 1791 * Also the associd must be cleared on RUN -> ASSOC 1792 * transitions. 1793 */ 1794 if ((error = wpi_auth(sc, vap)) != 0) { 1795 device_printf(sc->sc_dev, 1796 "%s: could not move to AUTH state, error %d\n", 1797 __func__, error); 1798 } 1799 break; 1800 1801 case IEEE80211_S_RUN: 1802 /* 1803 * RUN -> RUN transition: 1804 * STA mode: Just restart the timers. 1805 * IBSS mode: Process IBSS merge. 1806 */ 1807 if (vap->iv_state == IEEE80211_S_RUN) { 1808 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1809 WPI_RXON_LOCK(sc); 1810 wpi_calib_timeout(sc); 1811 WPI_RXON_UNLOCK(sc); 1812 break; 1813 } else { 1814 /* 1815 * Drop the BSS_FILTER bit 1816 * (there is no another way to change bssid). 1817 */ 1818 WPI_RXON_LOCK(sc); 1819 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1820 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1821 device_printf(sc->sc_dev, 1822 "%s: could not send RXON\n", 1823 __func__); 1824 } 1825 WPI_RXON_UNLOCK(sc); 1826 1827 /* Restore all what was lost. */ 1828 wpi_restore_node_table(sc, wvp); 1829 1830 /* XXX set conditionally? */ 1831 wpi_updateedca(ic); 1832 } 1833 } 1834 1835 /* 1836 * !RUN -> RUN requires setting the association id 1837 * which is done with a firmware cmd. We also defer 1838 * starting the timers until that work is done. 1839 */ 1840 if ((error = wpi_run(sc, vap)) != 0) { 1841 device_printf(sc->sc_dev, 1842 "%s: could not move to RUN state\n", __func__); 1843 } 1844 break; 1845 1846 default: 1847 break; 1848 } 1849 if (error != 0) { 1850 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1851 return error; 1852 } 1853 1854 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1855 1856 return wvp->wv_newstate(vap, nstate, arg); 1857 } 1858 1859 static void 1860 wpi_calib_timeout(void *arg) 1861 { 1862 struct wpi_softc *sc = arg; 1863 1864 if (wpi_check_bss_filter(sc) == 0) 1865 return; 1866 1867 wpi_power_calibration(sc); 1868 1869 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1870 } 1871 1872 static __inline uint8_t 1873 rate2plcp(const uint8_t rate) 1874 { 1875 switch (rate) { 1876 case 12: return 0xd; 1877 case 18: return 0xf; 1878 case 24: return 0x5; 1879 case 36: return 0x7; 1880 case 48: return 0x9; 1881 case 72: return 0xb; 1882 case 96: return 0x1; 1883 case 108: return 0x3; 1884 case 2: return 10; 1885 case 4: return 20; 1886 case 11: return 55; 1887 case 22: return 110; 1888 default: return 0; 1889 } 1890 } 1891 1892 static __inline uint8_t 1893 plcp2rate(const uint8_t plcp) 1894 { 1895 switch (plcp) { 1896 case 0xd: return 12; 1897 case 0xf: return 18; 1898 case 0x5: return 24; 1899 case 0x7: return 36; 1900 case 0x9: return 48; 1901 case 0xb: return 72; 1902 case 0x1: return 96; 1903 case 0x3: return 108; 1904 case 10: return 2; 1905 case 20: return 4; 1906 case 55: return 11; 1907 case 110: return 22; 1908 default: return 0; 1909 } 1910 } 1911 1912 /* Quickly determine if a given rate is CCK or OFDM. */ 1913 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1914 1915 static void 1916 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1917 struct wpi_rx_data *data) 1918 { 1919 struct ieee80211com *ic = &sc->sc_ic; 1920 struct wpi_rx_ring *ring = &sc->rxq; 1921 struct wpi_rx_stat *stat; 1922 struct wpi_rx_head *head; 1923 struct wpi_rx_tail *tail; 1924 struct ieee80211_frame *wh; 1925 struct ieee80211_node *ni; 1926 struct mbuf *m, *m1; 1927 bus_addr_t paddr; 1928 uint32_t flags; 1929 uint16_t len; 1930 int error; 1931 1932 stat = (struct wpi_rx_stat *)(desc + 1); 1933 1934 if (stat->len > WPI_STAT_MAXLEN) { 1935 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1936 goto fail1; 1937 } 1938 1939 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1940 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1941 len = le16toh(head->len); 1942 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1943 flags = le32toh(tail->flags); 1944 1945 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1946 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1947 le32toh(desc->len), len, (int8_t)stat->rssi, 1948 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1949 1950 /* Discard frames with a bad FCS early. */ 1951 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1952 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1953 __func__, flags); 1954 goto fail1; 1955 } 1956 /* Discard frames that are too short. */ 1957 if (len < sizeof (struct ieee80211_frame_ack)) { 1958 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1959 __func__, len); 1960 goto fail1; 1961 } 1962 1963 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1964 if (m1 == NULL) { 1965 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1966 __func__); 1967 goto fail1; 1968 } 1969 bus_dmamap_unload(ring->data_dmat, data->map); 1970 1971 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1972 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1973 if (error != 0 && error != EFBIG) { 1974 device_printf(sc->sc_dev, 1975 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1976 m_freem(m1); 1977 1978 /* Try to reload the old mbuf. */ 1979 error = bus_dmamap_load(ring->data_dmat, data->map, 1980 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1981 &paddr, BUS_DMA_NOWAIT); 1982 if (error != 0 && error != EFBIG) { 1983 panic("%s: could not load old RX mbuf", __func__); 1984 } 1985 /* Physical address may have changed. */ 1986 ring->desc[ring->cur] = htole32(paddr); 1987 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1988 BUS_DMASYNC_PREWRITE); 1989 goto fail1; 1990 } 1991 1992 m = data->m; 1993 data->m = m1; 1994 /* Update RX descriptor. */ 1995 ring->desc[ring->cur] = htole32(paddr); 1996 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1997 BUS_DMASYNC_PREWRITE); 1998 1999 /* Finalize mbuf. */ 2000 m->m_data = (caddr_t)(head + 1); 2001 m->m_pkthdr.len = m->m_len = len; 2002 2003 /* Grab a reference to the source node. */ 2004 wh = mtod(m, struct ieee80211_frame *); 2005 2006 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2007 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2008 /* Check whether decryption was successful or not. */ 2009 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2010 DPRINTF(sc, WPI_DEBUG_RECV, 2011 "CCMP decryption failed 0x%x\n", flags); 2012 goto fail2; 2013 } 2014 m->m_flags |= M_WEP; 2015 } 2016 2017 if (len >= sizeof(struct ieee80211_frame_min)) 2018 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2019 else 2020 ni = NULL; 2021 2022 sc->rx_tstamp = tail->tstamp; 2023 2024 if (ieee80211_radiotap_active(ic)) { 2025 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2026 2027 tap->wr_flags = 0; 2028 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2029 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2030 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2031 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2032 tap->wr_tsft = tail->tstamp; 2033 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2034 tap->wr_rate = plcp2rate(head->plcp); 2035 } 2036 2037 WPI_UNLOCK(sc); 2038 2039 /* Send the frame to the 802.11 layer. */ 2040 if (ni != NULL) { 2041 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2042 /* Node is no longer needed. */ 2043 ieee80211_free_node(ni); 2044 } else 2045 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2046 2047 WPI_LOCK(sc); 2048 2049 return; 2050 2051 fail2: m_freem(m); 2052 2053 fail1: counter_u64_add(ic->ic_ierrors, 1); 2054 } 2055 2056 static void 2057 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2058 struct wpi_rx_data *data) 2059 { 2060 /* Ignore */ 2061 } 2062 2063 static void 2064 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2065 { 2066 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2067 struct wpi_tx_data *data = &ring->data[desc->idx]; 2068 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2069 struct mbuf *m; 2070 struct ieee80211_node *ni; 2071 struct ieee80211vap *vap; 2072 struct ieee80211com *ic; 2073 uint32_t status = le32toh(stat->status); 2074 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2075 2076 KASSERT(data->ni != NULL, ("no node")); 2077 KASSERT(data->m != NULL, ("no mbuf")); 2078 2079 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2080 2081 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2082 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2083 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2084 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2085 2086 /* Unmap and free mbuf. */ 2087 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2088 bus_dmamap_unload(ring->data_dmat, data->map); 2089 m = data->m, data->m = NULL; 2090 ni = data->ni, data->ni = NULL; 2091 vap = ni->ni_vap; 2092 ic = vap->iv_ic; 2093 2094 /* 2095 * Update rate control statistics for the node. 2096 */ 2097 if (status & WPI_TX_STATUS_FAIL) { 2098 ieee80211_ratectl_tx_complete(vap, ni, 2099 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2100 } else 2101 ieee80211_ratectl_tx_complete(vap, ni, 2102 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2103 2104 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2105 2106 WPI_TXQ_STATE_LOCK(sc); 2107 ring->queued -= 1; 2108 if (ring->queued > 0) { 2109 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2110 2111 if ((sc->qfullmsk & (1 << ring->qid)) != 0 && 2112 ring->queued < WPI_TX_RING_LOMARK) { 2113 sc->qfullmsk &= ~(1 << ring->qid); 2114 ieee80211_runtask(ic, &sc->sc_start_task); 2115 } 2116 } else 2117 callout_stop(&sc->tx_timeout); 2118 WPI_TXQ_STATE_UNLOCK(sc); 2119 2120 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2121 } 2122 2123 /* 2124 * Process a "command done" firmware notification. This is where we wakeup 2125 * processes waiting for a synchronous command completion. 2126 */ 2127 static void 2128 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2129 { 2130 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2131 struct wpi_tx_data *data; 2132 2133 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2134 "type %s len %d\n", desc->qid, desc->idx, 2135 desc->flags, wpi_cmd_str(desc->type), 2136 le32toh(desc->len)); 2137 2138 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2139 return; /* Not a command ack. */ 2140 2141 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2142 2143 data = &ring->data[desc->idx]; 2144 2145 /* If the command was mapped in an mbuf, free it. */ 2146 if (data->m != NULL) { 2147 bus_dmamap_sync(ring->data_dmat, data->map, 2148 BUS_DMASYNC_POSTWRITE); 2149 bus_dmamap_unload(ring->data_dmat, data->map); 2150 m_freem(data->m); 2151 data->m = NULL; 2152 } 2153 2154 wakeup(&ring->cmd[desc->idx]); 2155 2156 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2157 WPI_TXQ_LOCK(sc); 2158 if (sc->sc_flags & WPI_PS_PATH) { 2159 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2160 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2161 } else { 2162 sc->sc_update_rx_ring = wpi_update_rx_ring; 2163 sc->sc_update_tx_ring = wpi_update_tx_ring; 2164 } 2165 WPI_TXQ_UNLOCK(sc); 2166 } 2167 } 2168 2169 static void 2170 wpi_notif_intr(struct wpi_softc *sc) 2171 { 2172 struct ieee80211com *ic = &sc->sc_ic; 2173 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2174 uint32_t hw; 2175 2176 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2177 BUS_DMASYNC_POSTREAD); 2178 2179 hw = le32toh(sc->shared->next) & 0xfff; 2180 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2181 2182 while (sc->rxq.cur != hw) { 2183 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2184 2185 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2186 struct wpi_rx_desc *desc; 2187 2188 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2189 BUS_DMASYNC_POSTREAD); 2190 desc = mtod(data->m, struct wpi_rx_desc *); 2191 2192 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2193 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2194 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2195 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2196 2197 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2198 /* Reply to a command. */ 2199 wpi_cmd_done(sc, desc); 2200 } 2201 2202 switch (desc->type) { 2203 case WPI_RX_DONE: 2204 /* An 802.11 frame has been received. */ 2205 wpi_rx_done(sc, desc, data); 2206 2207 if (sc->sc_running == 0) { 2208 /* wpi_stop() was called. */ 2209 return; 2210 } 2211 2212 break; 2213 2214 case WPI_TX_DONE: 2215 /* An 802.11 frame has been transmitted. */ 2216 wpi_tx_done(sc, desc); 2217 break; 2218 2219 case WPI_RX_STATISTICS: 2220 case WPI_BEACON_STATISTICS: 2221 wpi_rx_statistics(sc, desc, data); 2222 break; 2223 2224 case WPI_BEACON_MISSED: 2225 { 2226 struct wpi_beacon_missed *miss = 2227 (struct wpi_beacon_missed *)(desc + 1); 2228 uint32_t expected, misses, received, threshold; 2229 2230 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2231 BUS_DMASYNC_POSTREAD); 2232 2233 misses = le32toh(miss->consecutive); 2234 expected = le32toh(miss->expected); 2235 received = le32toh(miss->received); 2236 threshold = MAX(2, vap->iv_bmissthreshold); 2237 2238 DPRINTF(sc, WPI_DEBUG_BMISS, 2239 "%s: beacons missed %u(%u) (received %u/%u)\n", 2240 __func__, misses, le32toh(miss->total), received, 2241 expected); 2242 2243 if (misses >= threshold || 2244 (received == 0 && expected >= threshold)) { 2245 WPI_RXON_LOCK(sc); 2246 if (callout_pending(&sc->scan_timeout)) { 2247 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2248 0, 1); 2249 } 2250 WPI_RXON_UNLOCK(sc); 2251 if (vap->iv_state == IEEE80211_S_RUN && 2252 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2253 ieee80211_beacon_miss(ic); 2254 } 2255 2256 break; 2257 } 2258 #ifdef WPI_DEBUG 2259 case WPI_BEACON_SENT: 2260 { 2261 struct wpi_tx_stat *stat = 2262 (struct wpi_tx_stat *)(desc + 1); 2263 uint64_t *tsf = (uint64_t *)(stat + 1); 2264 uint32_t *mode = (uint32_t *)(tsf + 1); 2265 2266 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2267 BUS_DMASYNC_POSTREAD); 2268 2269 DPRINTF(sc, WPI_DEBUG_BEACON, 2270 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2271 "duration %u, status %x, tsf %ju, mode %x\n", 2272 stat->rtsfailcnt, stat->ackfailcnt, 2273 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2274 le32toh(stat->status), *tsf, *mode); 2275 2276 break; 2277 } 2278 #endif 2279 case WPI_UC_READY: 2280 { 2281 struct wpi_ucode_info *uc = 2282 (struct wpi_ucode_info *)(desc + 1); 2283 2284 /* The microcontroller is ready. */ 2285 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2286 BUS_DMASYNC_POSTREAD); 2287 DPRINTF(sc, WPI_DEBUG_RESET, 2288 "microcode alive notification version=%d.%d " 2289 "subtype=%x alive=%x\n", uc->major, uc->minor, 2290 uc->subtype, le32toh(uc->valid)); 2291 2292 if (le32toh(uc->valid) != 1) { 2293 device_printf(sc->sc_dev, 2294 "microcontroller initialization failed\n"); 2295 wpi_stop_locked(sc); 2296 return; 2297 } 2298 /* Save the address of the error log in SRAM. */ 2299 sc->errptr = le32toh(uc->errptr); 2300 break; 2301 } 2302 case WPI_STATE_CHANGED: 2303 { 2304 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2305 BUS_DMASYNC_POSTREAD); 2306 2307 uint32_t *status = (uint32_t *)(desc + 1); 2308 2309 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2310 le32toh(*status)); 2311 2312 if (le32toh(*status) & 1) { 2313 WPI_NT_LOCK(sc); 2314 wpi_clear_node_table(sc); 2315 WPI_NT_UNLOCK(sc); 2316 taskqueue_enqueue(sc->sc_tq, 2317 &sc->sc_radiooff_task); 2318 return; 2319 } 2320 break; 2321 } 2322 #ifdef WPI_DEBUG 2323 case WPI_START_SCAN: 2324 { 2325 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2326 BUS_DMASYNC_POSTREAD); 2327 2328 struct wpi_start_scan *scan = 2329 (struct wpi_start_scan *)(desc + 1); 2330 DPRINTF(sc, WPI_DEBUG_SCAN, 2331 "%s: scanning channel %d status %x\n", 2332 __func__, scan->chan, le32toh(scan->status)); 2333 2334 break; 2335 } 2336 #endif 2337 case WPI_STOP_SCAN: 2338 { 2339 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2340 BUS_DMASYNC_POSTREAD); 2341 2342 struct wpi_stop_scan *scan = 2343 (struct wpi_stop_scan *)(desc + 1); 2344 2345 DPRINTF(sc, WPI_DEBUG_SCAN, 2346 "scan finished nchan=%d status=%d chan=%d\n", 2347 scan->nchan, scan->status, scan->chan); 2348 2349 WPI_RXON_LOCK(sc); 2350 callout_stop(&sc->scan_timeout); 2351 WPI_RXON_UNLOCK(sc); 2352 if (scan->status == WPI_SCAN_ABORTED) 2353 ieee80211_cancel_scan(vap); 2354 else 2355 ieee80211_scan_next(vap); 2356 break; 2357 } 2358 } 2359 2360 if (sc->rxq.cur % 8 == 0) { 2361 /* Tell the firmware what we have processed. */ 2362 sc->sc_update_rx_ring(sc); 2363 } 2364 } 2365 } 2366 2367 /* 2368 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2369 * from power-down sleep mode. 2370 */ 2371 static void 2372 wpi_wakeup_intr(struct wpi_softc *sc) 2373 { 2374 int qid; 2375 2376 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2377 "%s: ucode wakeup from power-down sleep\n", __func__); 2378 2379 /* Wakeup RX and TX rings. */ 2380 if (sc->rxq.update) { 2381 sc->rxq.update = 0; 2382 wpi_update_rx_ring(sc); 2383 } 2384 WPI_TXQ_LOCK(sc); 2385 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2386 struct wpi_tx_ring *ring = &sc->txq[qid]; 2387 2388 if (ring->update) { 2389 ring->update = 0; 2390 wpi_update_tx_ring(sc, ring); 2391 } 2392 } 2393 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2394 WPI_TXQ_UNLOCK(sc); 2395 } 2396 2397 /* 2398 * This function prints firmware registers 2399 */ 2400 #ifdef WPI_DEBUG 2401 static void 2402 wpi_debug_registers(struct wpi_softc *sc) 2403 { 2404 size_t i; 2405 static const uint32_t csr_tbl[] = { 2406 WPI_HW_IF_CONFIG, 2407 WPI_INT, 2408 WPI_INT_MASK, 2409 WPI_FH_INT, 2410 WPI_GPIO_IN, 2411 WPI_RESET, 2412 WPI_GP_CNTRL, 2413 WPI_EEPROM, 2414 WPI_EEPROM_GP, 2415 WPI_GIO, 2416 WPI_UCODE_GP1, 2417 WPI_UCODE_GP2, 2418 WPI_GIO_CHICKEN, 2419 WPI_ANA_PLL, 2420 WPI_DBG_HPET_MEM, 2421 }; 2422 static const uint32_t prph_tbl[] = { 2423 WPI_APMG_CLK_CTRL, 2424 WPI_APMG_PS, 2425 WPI_APMG_PCI_STT, 2426 WPI_APMG_RFKILL, 2427 }; 2428 2429 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2430 2431 for (i = 0; i < nitems(csr_tbl); i++) { 2432 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2433 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2434 2435 if ((i + 1) % 2 == 0) 2436 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2437 } 2438 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2439 2440 if (wpi_nic_lock(sc) == 0) { 2441 for (i = 0; i < nitems(prph_tbl); i++) { 2442 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2443 wpi_get_prph_string(prph_tbl[i]), 2444 wpi_prph_read(sc, prph_tbl[i])); 2445 2446 if ((i + 1) % 2 == 0) 2447 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2448 } 2449 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2450 wpi_nic_unlock(sc); 2451 } else { 2452 DPRINTF(sc, WPI_DEBUG_REGISTER, 2453 "Cannot access internal registers.\n"); 2454 } 2455 } 2456 #endif 2457 2458 /* 2459 * Dump the error log of the firmware when a firmware panic occurs. Although 2460 * we can't debug the firmware because it is neither open source nor free, it 2461 * can help us to identify certain classes of problems. 2462 */ 2463 static void 2464 wpi_fatal_intr(struct wpi_softc *sc) 2465 { 2466 struct wpi_fw_dump dump; 2467 uint32_t i, offset, count; 2468 2469 /* Check that the error log address is valid. */ 2470 if (sc->errptr < WPI_FW_DATA_BASE || 2471 sc->errptr + sizeof (dump) > 2472 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2473 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2474 sc->errptr); 2475 return; 2476 } 2477 if (wpi_nic_lock(sc) != 0) { 2478 printf("%s: could not read firmware error log\n", __func__); 2479 return; 2480 } 2481 /* Read number of entries in the log. */ 2482 count = wpi_mem_read(sc, sc->errptr); 2483 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2484 printf("%s: invalid count field (count = %u)\n", __func__, 2485 count); 2486 wpi_nic_unlock(sc); 2487 return; 2488 } 2489 /* Skip "count" field. */ 2490 offset = sc->errptr + sizeof (uint32_t); 2491 printf("firmware error log (count = %u):\n", count); 2492 for (i = 0; i < count; i++) { 2493 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2494 sizeof (dump) / sizeof (uint32_t)); 2495 2496 printf(" error type = \"%s\" (0x%08X)\n", 2497 (dump.desc < nitems(wpi_fw_errmsg)) ? 2498 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2499 dump.desc); 2500 printf(" error data = 0x%08X\n", 2501 dump.data); 2502 printf(" branch link = 0x%08X%08X\n", 2503 dump.blink[0], dump.blink[1]); 2504 printf(" interrupt link = 0x%08X%08X\n", 2505 dump.ilink[0], dump.ilink[1]); 2506 printf(" time = %u\n", dump.time); 2507 2508 offset += sizeof (dump); 2509 } 2510 wpi_nic_unlock(sc); 2511 /* Dump driver status (TX and RX rings) while we're here. */ 2512 printf("driver status:\n"); 2513 WPI_TXQ_LOCK(sc); 2514 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2515 struct wpi_tx_ring *ring = &sc->txq[i]; 2516 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2517 i, ring->qid, ring->cur, ring->queued); 2518 } 2519 WPI_TXQ_UNLOCK(sc); 2520 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2521 } 2522 2523 static void 2524 wpi_intr(void *arg) 2525 { 2526 struct wpi_softc *sc = arg; 2527 uint32_t r1, r2; 2528 2529 WPI_LOCK(sc); 2530 2531 /* Disable interrupts. */ 2532 WPI_WRITE(sc, WPI_INT_MASK, 0); 2533 2534 r1 = WPI_READ(sc, WPI_INT); 2535 2536 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2537 goto end; /* Hardware gone! */ 2538 2539 r2 = WPI_READ(sc, WPI_FH_INT); 2540 2541 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2542 r1, r2); 2543 2544 if (r1 == 0 && r2 == 0) 2545 goto done; /* Interrupt not for us. */ 2546 2547 /* Acknowledge interrupts. */ 2548 WPI_WRITE(sc, WPI_INT, r1); 2549 WPI_WRITE(sc, WPI_FH_INT, r2); 2550 2551 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2552 device_printf(sc->sc_dev, "fatal firmware error\n"); 2553 #ifdef WPI_DEBUG 2554 wpi_debug_registers(sc); 2555 #endif 2556 wpi_fatal_intr(sc); 2557 DPRINTF(sc, WPI_DEBUG_HW, 2558 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2559 "(Hardware Error)"); 2560 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2561 goto end; 2562 } 2563 2564 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2565 (r2 & WPI_FH_INT_RX)) 2566 wpi_notif_intr(sc); 2567 2568 if (r1 & WPI_INT_ALIVE) 2569 wakeup(sc); /* Firmware is alive. */ 2570 2571 if (r1 & WPI_INT_WAKEUP) 2572 wpi_wakeup_intr(sc); 2573 2574 done: 2575 /* Re-enable interrupts. */ 2576 if (sc->sc_running) 2577 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2578 2579 end: WPI_UNLOCK(sc); 2580 } 2581 2582 static int 2583 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2584 { 2585 struct ieee80211_frame *wh; 2586 struct wpi_tx_cmd *cmd; 2587 struct wpi_tx_data *data; 2588 struct wpi_tx_desc *desc; 2589 struct wpi_tx_ring *ring; 2590 struct mbuf *m1; 2591 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2592 int error, i, hdrlen, nsegs, totlen, pad; 2593 2594 WPI_TXQ_LOCK(sc); 2595 2596 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2597 2598 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2599 2600 if (sc->sc_running == 0) { 2601 /* wpi_stop() was called */ 2602 error = ENETDOWN; 2603 goto fail; 2604 } 2605 2606 wh = mtod(buf->m, struct ieee80211_frame *); 2607 hdrlen = ieee80211_anyhdrsize(wh); 2608 totlen = buf->m->m_pkthdr.len; 2609 2610 if (hdrlen & 3) { 2611 /* First segment length must be a multiple of 4. */ 2612 pad = 4 - (hdrlen & 3); 2613 } else 2614 pad = 0; 2615 2616 ring = &sc->txq[buf->ac]; 2617 desc = &ring->desc[ring->cur]; 2618 data = &ring->data[ring->cur]; 2619 2620 /* Prepare TX firmware command. */ 2621 cmd = &ring->cmd[ring->cur]; 2622 cmd->code = buf->code; 2623 cmd->flags = 0; 2624 cmd->qid = ring->qid; 2625 cmd->idx = ring->cur; 2626 2627 memcpy(cmd->data, buf->data, buf->size); 2628 2629 /* Save and trim IEEE802.11 header. */ 2630 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2631 m_adj(buf->m, hdrlen); 2632 2633 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2634 segs, &nsegs, BUS_DMA_NOWAIT); 2635 if (error != 0 && error != EFBIG) { 2636 device_printf(sc->sc_dev, 2637 "%s: can't map mbuf (error %d)\n", __func__, error); 2638 goto fail; 2639 } 2640 if (error != 0) { 2641 /* Too many DMA segments, linearize mbuf. */ 2642 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2643 if (m1 == NULL) { 2644 device_printf(sc->sc_dev, 2645 "%s: could not defrag mbuf\n", __func__); 2646 error = ENOBUFS; 2647 goto fail; 2648 } 2649 buf->m = m1; 2650 2651 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2652 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2653 if (error != 0) { 2654 device_printf(sc->sc_dev, 2655 "%s: can't map mbuf (error %d)\n", __func__, 2656 error); 2657 goto fail; 2658 } 2659 } 2660 2661 KASSERT(nsegs < WPI_MAX_SCATTER, 2662 ("too many DMA segments, nsegs (%d) should be less than %d", 2663 nsegs, WPI_MAX_SCATTER)); 2664 2665 data->m = buf->m; 2666 data->ni = buf->ni; 2667 2668 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2669 __func__, ring->qid, ring->cur, totlen, nsegs); 2670 2671 /* Fill TX descriptor. */ 2672 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2673 /* First DMA segment is used by the TX command. */ 2674 desc->segs[0].addr = htole32(data->cmd_paddr); 2675 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2676 /* Other DMA segments are for data payload. */ 2677 seg = &segs[0]; 2678 for (i = 1; i <= nsegs; i++) { 2679 desc->segs[i].addr = htole32(seg->ds_addr); 2680 desc->segs[i].len = htole32(seg->ds_len); 2681 seg++; 2682 } 2683 2684 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2685 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2686 BUS_DMASYNC_PREWRITE); 2687 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2688 BUS_DMASYNC_PREWRITE); 2689 2690 /* Kick TX ring. */ 2691 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2692 sc->sc_update_tx_ring(sc, ring); 2693 2694 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2695 /* Mark TX ring as full if we reach a certain threshold. */ 2696 WPI_TXQ_STATE_LOCK(sc); 2697 if (++ring->queued > WPI_TX_RING_HIMARK) 2698 sc->qfullmsk |= 1 << ring->qid; 2699 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2700 WPI_TXQ_STATE_UNLOCK(sc); 2701 } 2702 2703 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2704 2705 WPI_TXQ_UNLOCK(sc); 2706 2707 return 0; 2708 2709 fail: m_freem(buf->m); 2710 2711 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2712 2713 WPI_TXQ_UNLOCK(sc); 2714 2715 return error; 2716 } 2717 2718 /* 2719 * Construct the data packet for a transmit buffer. 2720 */ 2721 static int 2722 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2723 { 2724 const struct ieee80211_txparam *tp; 2725 struct ieee80211vap *vap = ni->ni_vap; 2726 struct ieee80211com *ic = ni->ni_ic; 2727 struct wpi_node *wn = WPI_NODE(ni); 2728 struct ieee80211_channel *chan; 2729 struct ieee80211_frame *wh; 2730 struct ieee80211_key *k = NULL; 2731 struct wpi_buf tx_data; 2732 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2733 uint32_t flags; 2734 uint16_t qos; 2735 uint8_t tid, type; 2736 int ac, error, swcrypt, rate, ismcast, totlen; 2737 2738 wh = mtod(m, struct ieee80211_frame *); 2739 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2740 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2741 2742 /* Select EDCA Access Category and TX ring for this frame. */ 2743 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2744 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2745 tid = qos & IEEE80211_QOS_TID; 2746 } else { 2747 qos = 0; 2748 tid = 0; 2749 } 2750 ac = M_WME_GETAC(m); 2751 2752 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2753 ni->ni_chan : ic->ic_curchan; 2754 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2755 2756 /* Choose a TX rate index. */ 2757 if (type == IEEE80211_FC0_TYPE_MGT) 2758 rate = tp->mgmtrate; 2759 else if (ismcast) 2760 rate = tp->mcastrate; 2761 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2762 rate = tp->ucastrate; 2763 else if (m->m_flags & M_EAPOL) 2764 rate = tp->mgmtrate; 2765 else { 2766 /* XXX pass pktlen */ 2767 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2768 rate = ni->ni_txrate; 2769 } 2770 2771 /* Encrypt the frame if need be. */ 2772 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2773 /* Retrieve key for TX. */ 2774 k = ieee80211_crypto_encap(ni, m); 2775 if (k == NULL) { 2776 error = ENOBUFS; 2777 goto fail; 2778 } 2779 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2780 2781 /* 802.11 header may have moved. */ 2782 wh = mtod(m, struct ieee80211_frame *); 2783 } 2784 totlen = m->m_pkthdr.len; 2785 2786 if (ieee80211_radiotap_active_vap(vap)) { 2787 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2788 2789 tap->wt_flags = 0; 2790 tap->wt_rate = rate; 2791 if (k != NULL) 2792 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2793 2794 ieee80211_radiotap_tx(vap, m); 2795 } 2796 2797 flags = 0; 2798 if (!ismcast) { 2799 /* Unicast frame, check if an ACK is expected. */ 2800 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2801 IEEE80211_QOS_ACKPOLICY_NOACK) 2802 flags |= WPI_TX_NEED_ACK; 2803 } 2804 2805 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2806 flags |= WPI_TX_AUTO_SEQ; 2807 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2808 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2809 2810 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2811 if (!ismcast) { 2812 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2813 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2814 flags |= WPI_TX_NEED_RTS; 2815 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2816 WPI_RATE_IS_OFDM(rate)) { 2817 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2818 flags |= WPI_TX_NEED_CTS; 2819 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2820 flags |= WPI_TX_NEED_RTS; 2821 } 2822 2823 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2824 flags |= WPI_TX_FULL_TXOP; 2825 } 2826 2827 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2828 if (type == IEEE80211_FC0_TYPE_MGT) { 2829 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2830 2831 /* Tell HW to set timestamp in probe responses. */ 2832 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2833 flags |= WPI_TX_INSERT_TSTAMP; 2834 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2835 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2836 tx->timeout = htole16(3); 2837 else 2838 tx->timeout = htole16(2); 2839 } 2840 2841 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2842 tx->id = WPI_ID_BROADCAST; 2843 else { 2844 if (wn->id == WPI_ID_UNDEFINED) { 2845 device_printf(sc->sc_dev, 2846 "%s: undefined node id\n", __func__); 2847 error = EINVAL; 2848 goto fail; 2849 } 2850 2851 tx->id = wn->id; 2852 } 2853 2854 if (k != NULL && !swcrypt) { 2855 switch (k->wk_cipher->ic_cipher) { 2856 case IEEE80211_CIPHER_AES_CCM: 2857 tx->security = WPI_CIPHER_CCMP; 2858 break; 2859 2860 default: 2861 break; 2862 } 2863 2864 memcpy(tx->key, k->wk_key, k->wk_keylen); 2865 } 2866 2867 tx->len = htole16(totlen); 2868 tx->flags = htole32(flags); 2869 tx->plcp = rate2plcp(rate); 2870 tx->tid = tid; 2871 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2872 tx->ofdm_mask = 0xff; 2873 tx->cck_mask = 0x0f; 2874 tx->rts_ntries = 7; 2875 tx->data_ntries = tp->maxretry; 2876 2877 tx_data.ni = ni; 2878 tx_data.m = m; 2879 tx_data.size = sizeof(struct wpi_cmd_data); 2880 tx_data.code = WPI_CMD_TX_DATA; 2881 tx_data.ac = ac; 2882 2883 return wpi_cmd2(sc, &tx_data); 2884 2885 fail: m_freem(m); 2886 return error; 2887 } 2888 2889 static int 2890 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2891 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2892 { 2893 struct ieee80211vap *vap = ni->ni_vap; 2894 struct ieee80211_key *k = NULL; 2895 struct ieee80211_frame *wh; 2896 struct wpi_buf tx_data; 2897 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2898 uint32_t flags; 2899 uint8_t type; 2900 int ac, rate, swcrypt, totlen; 2901 2902 wh = mtod(m, struct ieee80211_frame *); 2903 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2904 2905 ac = params->ibp_pri & 3; 2906 2907 /* Choose a TX rate index. */ 2908 rate = params->ibp_rate0; 2909 2910 flags = 0; 2911 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2912 flags |= WPI_TX_AUTO_SEQ; 2913 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2914 flags |= WPI_TX_NEED_ACK; 2915 if (params->ibp_flags & IEEE80211_BPF_RTS) 2916 flags |= WPI_TX_NEED_RTS; 2917 if (params->ibp_flags & IEEE80211_BPF_CTS) 2918 flags |= WPI_TX_NEED_CTS; 2919 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2920 flags |= WPI_TX_FULL_TXOP; 2921 2922 /* Encrypt the frame if need be. */ 2923 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2924 /* Retrieve key for TX. */ 2925 k = ieee80211_crypto_encap(ni, m); 2926 if (k == NULL) { 2927 m_freem(m); 2928 return ENOBUFS; 2929 } 2930 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2931 2932 /* 802.11 header may have moved. */ 2933 wh = mtod(m, struct ieee80211_frame *); 2934 } 2935 totlen = m->m_pkthdr.len; 2936 2937 if (ieee80211_radiotap_active_vap(vap)) { 2938 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2939 2940 tap->wt_flags = 0; 2941 tap->wt_rate = rate; 2942 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2943 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2944 2945 ieee80211_radiotap_tx(vap, m); 2946 } 2947 2948 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2949 if (type == IEEE80211_FC0_TYPE_MGT) { 2950 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2951 2952 /* Tell HW to set timestamp in probe responses. */ 2953 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2954 flags |= WPI_TX_INSERT_TSTAMP; 2955 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2956 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2957 tx->timeout = htole16(3); 2958 else 2959 tx->timeout = htole16(2); 2960 } 2961 2962 if (k != NULL && !swcrypt) { 2963 switch (k->wk_cipher->ic_cipher) { 2964 case IEEE80211_CIPHER_AES_CCM: 2965 tx->security = WPI_CIPHER_CCMP; 2966 break; 2967 2968 default: 2969 break; 2970 } 2971 2972 memcpy(tx->key, k->wk_key, k->wk_keylen); 2973 } 2974 2975 tx->len = htole16(totlen); 2976 tx->flags = htole32(flags); 2977 tx->plcp = rate2plcp(rate); 2978 tx->id = WPI_ID_BROADCAST; 2979 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2980 tx->rts_ntries = params->ibp_try1; 2981 tx->data_ntries = params->ibp_try0; 2982 2983 tx_data.ni = ni; 2984 tx_data.m = m; 2985 tx_data.size = sizeof(struct wpi_cmd_data); 2986 tx_data.code = WPI_CMD_TX_DATA; 2987 tx_data.ac = ac; 2988 2989 return wpi_cmd2(sc, &tx_data); 2990 } 2991 2992 static __inline int 2993 wpi_tx_ring_is_full(struct wpi_softc *sc, int ac) 2994 { 2995 struct wpi_tx_ring *ring = &sc->txq[ac]; 2996 int retval; 2997 2998 WPI_TXQ_STATE_LOCK(sc); 2999 retval = (ring->queued > WPI_TX_RING_HIMARK); 3000 WPI_TXQ_STATE_UNLOCK(sc); 3001 3002 return retval; 3003 } 3004 3005 static __inline void 3006 wpi_handle_tx_failure(struct ieee80211_node *ni) 3007 { 3008 /* NB: m is reclaimed on tx failure */ 3009 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3010 ieee80211_free_node(ni); 3011 } 3012 3013 static int 3014 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3015 const struct ieee80211_bpf_params *params) 3016 { 3017 struct ieee80211com *ic = ni->ni_ic; 3018 struct wpi_softc *sc = ic->ic_softc; 3019 int ac, error = 0; 3020 3021 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3022 3023 ac = M_WME_GETAC(m); 3024 3025 WPI_TX_LOCK(sc); 3026 3027 if (sc->sc_running == 0 || wpi_tx_ring_is_full(sc, ac)) { 3028 m_freem(m); 3029 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3030 goto unlock; 3031 } 3032 3033 if (params == NULL) { 3034 /* 3035 * Legacy path; interpret frame contents to decide 3036 * precisely how to send the frame. 3037 */ 3038 error = wpi_tx_data(sc, m, ni); 3039 } else { 3040 /* 3041 * Caller supplied explicit parameters to use in 3042 * sending the frame. 3043 */ 3044 error = wpi_tx_data_raw(sc, m, ni, params); 3045 } 3046 3047 unlock: WPI_TX_UNLOCK(sc); 3048 3049 if (error != 0) { 3050 wpi_handle_tx_failure(ni); 3051 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3052 3053 return error; 3054 } 3055 3056 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3057 3058 return 0; 3059 } 3060 3061 static int 3062 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3063 { 3064 struct wpi_softc *sc = ic->ic_softc; 3065 struct ieee80211_node *ni; 3066 struct mbufq *sndq; 3067 int ac, error; 3068 3069 WPI_TX_LOCK(sc); 3070 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3071 3072 /* Check if interface is up & running. */ 3073 if (sc->sc_running == 0) { 3074 error = ENXIO; 3075 goto unlock; 3076 } 3077 3078 /* Check for available space. */ 3079 ac = M_WME_GETAC(m); 3080 sndq = &sc->txq[ac].snd; 3081 if (wpi_tx_ring_is_full(sc, ac) || mbufq_len(sndq) != 0) { 3082 /* wpi_tx_done() will dequeue it. */ 3083 error = mbufq_enqueue(sndq, m); 3084 goto unlock; 3085 } 3086 3087 error = 0; 3088 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3089 if (wpi_tx_data(sc, m, ni) != 0) { 3090 wpi_handle_tx_failure(ni); 3091 } 3092 3093 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3094 3095 unlock: WPI_TX_UNLOCK(sc); 3096 3097 return (error); 3098 } 3099 3100 /** 3101 * Process data waiting to be sent on the output queue 3102 */ 3103 static void 3104 wpi_start(void *arg0, int pending) 3105 { 3106 struct wpi_softc *sc = arg0; 3107 struct ieee80211_node *ni; 3108 struct mbuf *m; 3109 uint8_t i; 3110 3111 WPI_TX_LOCK(sc); 3112 if (sc->sc_running == 0) 3113 goto unlock; 3114 3115 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3116 3117 for (i = 0; i < WPI_CMD_QUEUE_NUM; i++) { 3118 struct mbufq *sndq = &sc->txq[i].snd; 3119 3120 for (;;) { 3121 if (wpi_tx_ring_is_full(sc, i)) 3122 break; 3123 3124 if ((m = mbufq_dequeue(sndq)) == NULL) 3125 break; 3126 3127 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3128 if (wpi_tx_data(sc, m, ni) != 0) { 3129 wpi_handle_tx_failure(ni); 3130 } 3131 } 3132 } 3133 3134 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3135 unlock: WPI_TX_UNLOCK(sc); 3136 } 3137 3138 static void 3139 wpi_watchdog_rfkill(void *arg) 3140 { 3141 struct wpi_softc *sc = arg; 3142 struct ieee80211com *ic = &sc->sc_ic; 3143 3144 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3145 3146 /* No need to lock firmware memory. */ 3147 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3148 /* Radio kill switch is still off. */ 3149 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3150 sc); 3151 } else 3152 ieee80211_runtask(ic, &sc->sc_radioon_task); 3153 } 3154 3155 static void 3156 wpi_scan_timeout(void *arg) 3157 { 3158 struct wpi_softc *sc = arg; 3159 struct ieee80211com *ic = &sc->sc_ic; 3160 3161 ic_printf(ic, "scan timeout\n"); 3162 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3163 } 3164 3165 static void 3166 wpi_tx_timeout(void *arg) 3167 { 3168 struct wpi_softc *sc = arg; 3169 struct ieee80211com *ic = &sc->sc_ic; 3170 3171 ic_printf(ic, "device timeout\n"); 3172 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3173 } 3174 3175 static void 3176 wpi_parent(struct ieee80211com *ic) 3177 { 3178 struct wpi_softc *sc = ic->ic_softc; 3179 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3180 3181 if (ic->ic_nrunning > 0) { 3182 if (wpi_init(sc) == 0) { 3183 ieee80211_notify_radio(ic, 1); 3184 ieee80211_start_all(ic); 3185 } else { 3186 ieee80211_notify_radio(ic, 0); 3187 ieee80211_stop(vap); 3188 } 3189 } else 3190 wpi_stop(sc); 3191 } 3192 3193 /* 3194 * Send a command to the firmware. 3195 */ 3196 static int 3197 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3198 int async) 3199 { 3200 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3201 struct wpi_tx_desc *desc; 3202 struct wpi_tx_data *data; 3203 struct wpi_tx_cmd *cmd; 3204 struct mbuf *m; 3205 bus_addr_t paddr; 3206 int totlen, error; 3207 3208 WPI_TXQ_LOCK(sc); 3209 3210 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3211 3212 if (sc->sc_running == 0) { 3213 /* wpi_stop() was called */ 3214 if (code == WPI_CMD_SCAN) 3215 error = ENETDOWN; 3216 else 3217 error = 0; 3218 3219 goto fail; 3220 } 3221 3222 if (async == 0) 3223 WPI_LOCK_ASSERT(sc); 3224 3225 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3226 __func__, wpi_cmd_str(code), size, async); 3227 3228 desc = &ring->desc[ring->cur]; 3229 data = &ring->data[ring->cur]; 3230 totlen = 4 + size; 3231 3232 if (size > sizeof cmd->data) { 3233 /* Command is too large to fit in a descriptor. */ 3234 if (totlen > MCLBYTES) { 3235 error = EINVAL; 3236 goto fail; 3237 } 3238 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3239 if (m == NULL) { 3240 error = ENOMEM; 3241 goto fail; 3242 } 3243 cmd = mtod(m, struct wpi_tx_cmd *); 3244 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3245 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3246 if (error != 0) { 3247 m_freem(m); 3248 goto fail; 3249 } 3250 data->m = m; 3251 } else { 3252 cmd = &ring->cmd[ring->cur]; 3253 paddr = data->cmd_paddr; 3254 } 3255 3256 cmd->code = code; 3257 cmd->flags = 0; 3258 cmd->qid = ring->qid; 3259 cmd->idx = ring->cur; 3260 memcpy(cmd->data, buf, size); 3261 3262 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3263 desc->segs[0].addr = htole32(paddr); 3264 desc->segs[0].len = htole32(totlen); 3265 3266 if (size > sizeof cmd->data) { 3267 bus_dmamap_sync(ring->data_dmat, data->map, 3268 BUS_DMASYNC_PREWRITE); 3269 } else { 3270 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3271 BUS_DMASYNC_PREWRITE); 3272 } 3273 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3274 BUS_DMASYNC_PREWRITE); 3275 3276 /* Kick command ring. */ 3277 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3278 sc->sc_update_tx_ring(sc, ring); 3279 3280 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3281 3282 WPI_TXQ_UNLOCK(sc); 3283 3284 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3285 3286 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3287 3288 WPI_TXQ_UNLOCK(sc); 3289 3290 return error; 3291 } 3292 3293 /* 3294 * Configure HW multi-rate retries. 3295 */ 3296 static int 3297 wpi_mrr_setup(struct wpi_softc *sc) 3298 { 3299 struct ieee80211com *ic = &sc->sc_ic; 3300 struct wpi_mrr_setup mrr; 3301 int i, error; 3302 3303 /* CCK rates (not used with 802.11a). */ 3304 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3305 mrr.rates[i].flags = 0; 3306 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3307 /* Fallback to the immediate lower CCK rate (if any.) */ 3308 mrr.rates[i].next = 3309 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3310 /* Try twice at this rate before falling back to "next". */ 3311 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3312 } 3313 /* OFDM rates (not used with 802.11b). */ 3314 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3315 mrr.rates[i].flags = 0; 3316 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3317 /* Fallback to the immediate lower rate (if any.) */ 3318 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3319 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3320 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3321 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3322 i - 1; 3323 /* Try twice at this rate before falling back to "next". */ 3324 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3325 } 3326 /* Setup MRR for control frames. */ 3327 mrr.which = htole32(WPI_MRR_CTL); 3328 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3329 if (error != 0) { 3330 device_printf(sc->sc_dev, 3331 "could not setup MRR for control frames\n"); 3332 return error; 3333 } 3334 /* Setup MRR for data frames. */ 3335 mrr.which = htole32(WPI_MRR_DATA); 3336 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3337 if (error != 0) { 3338 device_printf(sc->sc_dev, 3339 "could not setup MRR for data frames\n"); 3340 return error; 3341 } 3342 return 0; 3343 } 3344 3345 static int 3346 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3347 { 3348 struct ieee80211com *ic = ni->ni_ic; 3349 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3350 struct wpi_node *wn = WPI_NODE(ni); 3351 struct wpi_node_info node; 3352 int error; 3353 3354 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3355 3356 if (wn->id == WPI_ID_UNDEFINED) 3357 return EINVAL; 3358 3359 memset(&node, 0, sizeof node); 3360 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3361 node.id = wn->id; 3362 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3363 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3364 node.action = htole32(WPI_ACTION_SET_RATE); 3365 node.antenna = WPI_ANTENNA_BOTH; 3366 3367 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3368 wn->id, ether_sprintf(ni->ni_macaddr)); 3369 3370 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3371 if (error != 0) { 3372 device_printf(sc->sc_dev, 3373 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3374 error); 3375 return error; 3376 } 3377 3378 if (wvp->wv_gtk != 0) { 3379 error = wpi_set_global_keys(ni); 3380 if (error != 0) { 3381 device_printf(sc->sc_dev, 3382 "%s: error while setting global keys\n", __func__); 3383 return ENXIO; 3384 } 3385 } 3386 3387 return 0; 3388 } 3389 3390 /* 3391 * Broadcast node is used to send group-addressed and management frames. 3392 */ 3393 static int 3394 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3395 { 3396 struct ieee80211com *ic = &sc->sc_ic; 3397 struct wpi_node_info node; 3398 3399 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3400 3401 memset(&node, 0, sizeof node); 3402 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3403 node.id = WPI_ID_BROADCAST; 3404 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3405 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3406 node.action = htole32(WPI_ACTION_SET_RATE); 3407 node.antenna = WPI_ANTENNA_BOTH; 3408 3409 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3410 3411 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3412 } 3413 3414 static int 3415 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3416 { 3417 struct wpi_node *wn = WPI_NODE(ni); 3418 int error; 3419 3420 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3421 3422 wn->id = wpi_add_node_entry_sta(sc); 3423 3424 if ((error = wpi_add_node(sc, ni)) != 0) { 3425 wpi_del_node_entry(sc, wn->id); 3426 wn->id = WPI_ID_UNDEFINED; 3427 return error; 3428 } 3429 3430 return 0; 3431 } 3432 3433 static int 3434 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3435 { 3436 struct wpi_node *wn = WPI_NODE(ni); 3437 int error; 3438 3439 KASSERT(wn->id == WPI_ID_UNDEFINED, 3440 ("the node %d was added before", wn->id)); 3441 3442 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3443 3444 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3445 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3446 return ENOMEM; 3447 } 3448 3449 if ((error = wpi_add_node(sc, ni)) != 0) { 3450 wpi_del_node_entry(sc, wn->id); 3451 wn->id = WPI_ID_UNDEFINED; 3452 return error; 3453 } 3454 3455 return 0; 3456 } 3457 3458 static void 3459 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3460 { 3461 struct wpi_node *wn = WPI_NODE(ni); 3462 struct wpi_cmd_del_node node; 3463 int error; 3464 3465 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3466 3467 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3468 3469 memset(&node, 0, sizeof node); 3470 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3471 node.count = 1; 3472 3473 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3474 wn->id, ether_sprintf(ni->ni_macaddr)); 3475 3476 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3477 if (error != 0) { 3478 device_printf(sc->sc_dev, 3479 "%s: could not delete node %u, error %d\n", __func__, 3480 wn->id, error); 3481 } 3482 } 3483 3484 static int 3485 wpi_updateedca(struct ieee80211com *ic) 3486 { 3487 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3488 struct wpi_softc *sc = ic->ic_softc; 3489 struct wpi_edca_params cmd; 3490 int aci, error; 3491 3492 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3493 3494 memset(&cmd, 0, sizeof cmd); 3495 cmd.flags = htole32(WPI_EDCA_UPDATE); 3496 for (aci = 0; aci < WME_NUM_AC; aci++) { 3497 const struct wmeParams *ac = 3498 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3499 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3500 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3501 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3502 cmd.ac[aci].txoplimit = 3503 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3504 3505 DPRINTF(sc, WPI_DEBUG_EDCA, 3506 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3507 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3508 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3509 cmd.ac[aci].txoplimit); 3510 } 3511 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3512 3513 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3514 3515 return error; 3516 #undef WPI_EXP2 3517 } 3518 3519 static void 3520 wpi_set_promisc(struct wpi_softc *sc) 3521 { 3522 struct ieee80211com *ic = &sc->sc_ic; 3523 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3524 uint32_t promisc_filter; 3525 3526 promisc_filter = WPI_FILTER_CTL; 3527 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3528 promisc_filter |= WPI_FILTER_PROMISC; 3529 3530 if (ic->ic_promisc > 0) 3531 sc->rxon.filter |= htole32(promisc_filter); 3532 else 3533 sc->rxon.filter &= ~htole32(promisc_filter); 3534 } 3535 3536 static void 3537 wpi_update_promisc(struct ieee80211com *ic) 3538 { 3539 struct wpi_softc *sc = ic->ic_softc; 3540 3541 WPI_RXON_LOCK(sc); 3542 wpi_set_promisc(sc); 3543 3544 if (wpi_send_rxon(sc, 1, 1) != 0) { 3545 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3546 __func__); 3547 } 3548 WPI_RXON_UNLOCK(sc); 3549 } 3550 3551 static void 3552 wpi_update_mcast(struct ieee80211com *ic) 3553 { 3554 /* Ignore */ 3555 } 3556 3557 static void 3558 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3559 { 3560 struct wpi_cmd_led led; 3561 3562 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3563 3564 led.which = which; 3565 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3566 led.off = off; 3567 led.on = on; 3568 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3569 } 3570 3571 static int 3572 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3573 { 3574 struct wpi_cmd_timing cmd; 3575 uint64_t val, mod; 3576 3577 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3578 3579 memset(&cmd, 0, sizeof cmd); 3580 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3581 cmd.bintval = htole16(ni->ni_intval); 3582 cmd.lintval = htole16(10); 3583 3584 /* Compute remaining time until next beacon. */ 3585 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3586 mod = le64toh(cmd.tstamp) % val; 3587 cmd.binitval = htole32((uint32_t)(val - mod)); 3588 3589 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3590 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3591 3592 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3593 } 3594 3595 /* 3596 * This function is called periodically (every 60 seconds) to adjust output 3597 * power to temperature changes. 3598 */ 3599 static void 3600 wpi_power_calibration(struct wpi_softc *sc) 3601 { 3602 int temp; 3603 3604 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3605 3606 /* Update sensor data. */ 3607 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3608 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3609 3610 /* Sanity-check read value. */ 3611 if (temp < -260 || temp > 25) { 3612 /* This can't be correct, ignore. */ 3613 DPRINTF(sc, WPI_DEBUG_TEMP, 3614 "out-of-range temperature reported: %d\n", temp); 3615 return; 3616 } 3617 3618 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3619 3620 /* Adjust Tx power if need be. */ 3621 if (abs(temp - sc->temp) <= 6) 3622 return; 3623 3624 sc->temp = temp; 3625 3626 if (wpi_set_txpower(sc, 1) != 0) { 3627 /* just warn, too bad for the automatic calibration... */ 3628 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3629 } 3630 } 3631 3632 /* 3633 * Set TX power for current channel. 3634 */ 3635 static int 3636 wpi_set_txpower(struct wpi_softc *sc, int async) 3637 { 3638 struct wpi_power_group *group; 3639 struct wpi_cmd_txpower cmd; 3640 uint8_t chan; 3641 int idx, is_chan_5ghz, i; 3642 3643 /* Retrieve current channel from last RXON. */ 3644 chan = sc->rxon.chan; 3645 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3646 3647 /* Find the TX power group to which this channel belongs. */ 3648 if (is_chan_5ghz) { 3649 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3650 if (chan <= group->chan) 3651 break; 3652 } else 3653 group = &sc->groups[0]; 3654 3655 memset(&cmd, 0, sizeof cmd); 3656 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3657 cmd.chan = htole16(chan); 3658 3659 /* Set TX power for all OFDM and CCK rates. */ 3660 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3661 /* Retrieve TX power for this channel/rate. */ 3662 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3663 3664 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3665 3666 if (is_chan_5ghz) { 3667 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3668 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3669 } else { 3670 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3671 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3672 } 3673 DPRINTF(sc, WPI_DEBUG_TEMP, 3674 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3675 } 3676 3677 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3678 } 3679 3680 /* 3681 * Determine Tx power index for a given channel/rate combination. 3682 * This takes into account the regulatory information from EEPROM and the 3683 * current temperature. 3684 */ 3685 static int 3686 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3687 uint8_t chan, int is_chan_5ghz, int ridx) 3688 { 3689 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3690 #define fdivround(a, b, n) \ 3691 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3692 3693 /* Linear interpolation. */ 3694 #define interpolate(x, x1, y1, x2, y2, n) \ 3695 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3696 3697 struct wpi_power_sample *sample; 3698 int pwr, idx; 3699 3700 /* Default TX power is group maximum TX power minus 3dB. */ 3701 pwr = group->maxpwr / 2; 3702 3703 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3704 switch (ridx) { 3705 case WPI_RIDX_OFDM36: 3706 pwr -= is_chan_5ghz ? 5 : 0; 3707 break; 3708 case WPI_RIDX_OFDM48: 3709 pwr -= is_chan_5ghz ? 10 : 7; 3710 break; 3711 case WPI_RIDX_OFDM54: 3712 pwr -= is_chan_5ghz ? 12 : 9; 3713 break; 3714 } 3715 3716 /* Never exceed the channel maximum allowed TX power. */ 3717 pwr = min(pwr, sc->maxpwr[chan]); 3718 3719 /* Retrieve TX power index into gain tables from samples. */ 3720 for (sample = group->samples; sample < &group->samples[3]; sample++) 3721 if (pwr > sample[1].power) 3722 break; 3723 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3724 idx = interpolate(pwr, sample[0].power, sample[0].index, 3725 sample[1].power, sample[1].index, 19); 3726 3727 /*- 3728 * Adjust power index based on current temperature: 3729 * - if cooler than factory-calibrated: decrease output power 3730 * - if warmer than factory-calibrated: increase output power 3731 */ 3732 idx -= (sc->temp - group->temp) * 11 / 100; 3733 3734 /* Decrease TX power for CCK rates (-5dB). */ 3735 if (ridx >= WPI_RIDX_CCK1) 3736 idx += 10; 3737 3738 /* Make sure idx stays in a valid range. */ 3739 if (idx < 0) 3740 return 0; 3741 if (idx > WPI_MAX_PWR_INDEX) 3742 return WPI_MAX_PWR_INDEX; 3743 return idx; 3744 3745 #undef interpolate 3746 #undef fdivround 3747 } 3748 3749 /* 3750 * Set STA mode power saving level (between 0 and 5). 3751 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3752 */ 3753 static int 3754 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3755 { 3756 struct wpi_pmgt_cmd cmd; 3757 const struct wpi_pmgt *pmgt; 3758 uint32_t max, skip_dtim; 3759 uint32_t reg; 3760 int i; 3761 3762 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3763 "%s: dtim=%d, level=%d, async=%d\n", 3764 __func__, dtim, level, async); 3765 3766 /* Select which PS parameters to use. */ 3767 if (dtim <= 10) 3768 pmgt = &wpi_pmgt[0][level]; 3769 else 3770 pmgt = &wpi_pmgt[1][level]; 3771 3772 memset(&cmd, 0, sizeof cmd); 3773 WPI_TXQ_LOCK(sc); 3774 if (level != 0) { /* not CAM */ 3775 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3776 sc->sc_flags |= WPI_PS_PATH; 3777 } else 3778 sc->sc_flags &= ~WPI_PS_PATH; 3779 WPI_TXQ_UNLOCK(sc); 3780 /* Retrieve PCIe Active State Power Management (ASPM). */ 3781 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3782 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3783 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3784 3785 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3786 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3787 3788 if (dtim == 0) { 3789 dtim = 1; 3790 skip_dtim = 0; 3791 } else 3792 skip_dtim = pmgt->skip_dtim; 3793 3794 if (skip_dtim != 0) { 3795 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3796 max = pmgt->intval[4]; 3797 if (max == (uint32_t)-1) 3798 max = dtim * (skip_dtim + 1); 3799 else if (max > dtim) 3800 max = (max / dtim) * dtim; 3801 } else 3802 max = dtim; 3803 3804 for (i = 0; i < 5; i++) 3805 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3806 3807 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3808 } 3809 3810 static int 3811 wpi_send_btcoex(struct wpi_softc *sc) 3812 { 3813 struct wpi_bluetooth cmd; 3814 3815 memset(&cmd, 0, sizeof cmd); 3816 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3817 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3818 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3819 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3820 __func__); 3821 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3822 } 3823 3824 static int 3825 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3826 { 3827 int error; 3828 3829 if (async) 3830 WPI_RXON_LOCK_ASSERT(sc); 3831 3832 if (assoc && wpi_check_bss_filter(sc) != 0) { 3833 struct wpi_assoc rxon_assoc; 3834 3835 rxon_assoc.flags = sc->rxon.flags; 3836 rxon_assoc.filter = sc->rxon.filter; 3837 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3838 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3839 rxon_assoc.reserved = 0; 3840 3841 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3842 sizeof (struct wpi_assoc), async); 3843 if (error != 0) { 3844 device_printf(sc->sc_dev, 3845 "RXON_ASSOC command failed, error %d\n", error); 3846 return error; 3847 } 3848 } else { 3849 if (async) { 3850 WPI_NT_LOCK(sc); 3851 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3852 sizeof (struct wpi_rxon), async); 3853 if (error == 0) 3854 wpi_clear_node_table(sc); 3855 WPI_NT_UNLOCK(sc); 3856 } else { 3857 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3858 sizeof (struct wpi_rxon), async); 3859 if (error == 0) 3860 wpi_clear_node_table(sc); 3861 } 3862 3863 if (error != 0) { 3864 device_printf(sc->sc_dev, 3865 "RXON command failed, error %d\n", error); 3866 return error; 3867 } 3868 3869 /* Add broadcast node. */ 3870 error = wpi_add_broadcast_node(sc, async); 3871 if (error != 0) { 3872 device_printf(sc->sc_dev, 3873 "could not add broadcast node, error %d\n", error); 3874 return error; 3875 } 3876 } 3877 3878 /* Configuration has changed, set Tx power accordingly. */ 3879 if ((error = wpi_set_txpower(sc, async)) != 0) { 3880 device_printf(sc->sc_dev, 3881 "%s: could not set TX power, error %d\n", __func__, error); 3882 return error; 3883 } 3884 3885 return 0; 3886 } 3887 3888 /** 3889 * Configure the card to listen to a particular channel, this transisions the 3890 * card in to being able to receive frames from remote devices. 3891 */ 3892 static int 3893 wpi_config(struct wpi_softc *sc) 3894 { 3895 struct ieee80211com *ic = &sc->sc_ic; 3896 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3897 struct ieee80211_channel *c = ic->ic_curchan; 3898 int error; 3899 3900 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3901 3902 /* Set power saving level to CAM during initialization. */ 3903 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3904 device_printf(sc->sc_dev, 3905 "%s: could not set power saving level\n", __func__); 3906 return error; 3907 } 3908 3909 /* Configure bluetooth coexistence. */ 3910 if ((error = wpi_send_btcoex(sc)) != 0) { 3911 device_printf(sc->sc_dev, 3912 "could not configure bluetooth coexistence\n"); 3913 return error; 3914 } 3915 3916 /* Configure adapter. */ 3917 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3918 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3919 3920 /* Set default channel. */ 3921 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3922 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3923 if (IEEE80211_IS_CHAN_2GHZ(c)) 3924 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3925 3926 sc->rxon.filter = WPI_FILTER_MULTICAST; 3927 switch (ic->ic_opmode) { 3928 case IEEE80211_M_STA: 3929 sc->rxon.mode = WPI_MODE_STA; 3930 break; 3931 case IEEE80211_M_IBSS: 3932 sc->rxon.mode = WPI_MODE_IBSS; 3933 sc->rxon.filter |= WPI_FILTER_BEACON; 3934 break; 3935 case IEEE80211_M_HOSTAP: 3936 /* XXX workaround for beaconing */ 3937 sc->rxon.mode = WPI_MODE_IBSS; 3938 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3939 break; 3940 case IEEE80211_M_AHDEMO: 3941 sc->rxon.mode = WPI_MODE_HOSTAP; 3942 break; 3943 case IEEE80211_M_MONITOR: 3944 sc->rxon.mode = WPI_MODE_MONITOR; 3945 break; 3946 default: 3947 device_printf(sc->sc_dev, "unknown opmode %d\n", 3948 ic->ic_opmode); 3949 return EINVAL; 3950 } 3951 sc->rxon.filter = htole32(sc->rxon.filter); 3952 wpi_set_promisc(sc); 3953 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3954 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3955 3956 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3957 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3958 __func__); 3959 return error; 3960 } 3961 3962 /* Setup rate scalling. */ 3963 if ((error = wpi_mrr_setup(sc)) != 0) { 3964 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3965 error); 3966 return error; 3967 } 3968 3969 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3970 3971 return 0; 3972 } 3973 3974 static uint16_t 3975 wpi_get_active_dwell_time(struct wpi_softc *sc, 3976 struct ieee80211_channel *c, uint8_t n_probes) 3977 { 3978 /* No channel? Default to 2GHz settings. */ 3979 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3980 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3981 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3982 } 3983 3984 /* 5GHz dwell time. */ 3985 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3986 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3987 } 3988 3989 /* 3990 * Limit the total dwell time. 3991 * 3992 * Returns the dwell time in milliseconds. 3993 */ 3994 static uint16_t 3995 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 3996 { 3997 struct ieee80211com *ic = &sc->sc_ic; 3998 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3999 int bintval = 0; 4000 4001 /* bintval is in TU (1.024mS) */ 4002 if (vap != NULL) 4003 bintval = vap->iv_bss->ni_intval; 4004 4005 /* 4006 * If it's non-zero, we should calculate the minimum of 4007 * it and the DWELL_BASE. 4008 * 4009 * XXX Yes, the math should take into account that bintval 4010 * is 1.024mS, not 1mS.. 4011 */ 4012 if (bintval > 0) { 4013 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4014 bintval); 4015 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4016 } 4017 4018 /* No association context? Default. */ 4019 return dwell_time; 4020 } 4021 4022 static uint16_t 4023 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4024 { 4025 uint16_t passive; 4026 4027 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4028 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4029 else 4030 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4031 4032 /* Clamp to the beacon interval if we're associated. */ 4033 return (wpi_limit_dwell(sc, passive)); 4034 } 4035 4036 static uint32_t 4037 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4038 { 4039 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4040 uint32_t nbeacons = time / bintval; 4041 4042 if (mod > WPI_PAUSE_MAX_TIME) 4043 mod = WPI_PAUSE_MAX_TIME; 4044 4045 return WPI_PAUSE_SCAN(nbeacons, mod); 4046 } 4047 4048 /* 4049 * Send a scan request to the firmware. 4050 */ 4051 static int 4052 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4053 { 4054 struct ieee80211com *ic = &sc->sc_ic; 4055 struct ieee80211_scan_state *ss = ic->ic_scan; 4056 struct ieee80211vap *vap = ss->ss_vap; 4057 struct wpi_scan_hdr *hdr; 4058 struct wpi_cmd_data *tx; 4059 struct wpi_scan_essid *essids; 4060 struct wpi_scan_chan *chan; 4061 struct ieee80211_frame *wh; 4062 struct ieee80211_rateset *rs; 4063 uint16_t dwell_active, dwell_passive; 4064 uint8_t *buf, *frm; 4065 int bgscan, bintval, buflen, error, i, nssid; 4066 4067 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4068 4069 /* 4070 * We are absolutely not allowed to send a scan command when another 4071 * scan command is pending. 4072 */ 4073 if (callout_pending(&sc->scan_timeout)) { 4074 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4075 __func__); 4076 error = EAGAIN; 4077 goto fail; 4078 } 4079 4080 bgscan = wpi_check_bss_filter(sc); 4081 bintval = vap->iv_bss->ni_intval; 4082 if (bgscan != 0 && 4083 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4084 error = EOPNOTSUPP; 4085 goto fail; 4086 } 4087 4088 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4089 if (buf == NULL) { 4090 device_printf(sc->sc_dev, 4091 "%s: could not allocate buffer for scan command\n", 4092 __func__); 4093 error = ENOMEM; 4094 goto fail; 4095 } 4096 hdr = (struct wpi_scan_hdr *)buf; 4097 4098 /* 4099 * Move to the next channel if no packets are received within 10 msecs 4100 * after sending the probe request. 4101 */ 4102 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4103 hdr->quiet_threshold = htole16(1); 4104 4105 if (bgscan != 0) { 4106 /* 4107 * Max needs to be greater than active and passive and quiet! 4108 * It's also in microseconds! 4109 */ 4110 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4111 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4112 bintval)); 4113 } 4114 4115 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4116 4117 tx = (struct wpi_cmd_data *)(hdr + 1); 4118 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4119 tx->id = WPI_ID_BROADCAST; 4120 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4121 4122 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4123 /* Send probe requests at 6Mbps. */ 4124 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4125 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4126 } else { 4127 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4128 /* Send probe requests at 1Mbps. */ 4129 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4130 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4131 } 4132 4133 essids = (struct wpi_scan_essid *)(tx + 1); 4134 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4135 for (i = 0; i < nssid; i++) { 4136 essids[i].id = IEEE80211_ELEMID_SSID; 4137 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4138 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4139 #ifdef WPI_DEBUG 4140 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4141 printf("Scanning Essid: "); 4142 ieee80211_print_essid(essids[i].data, essids[i].len); 4143 printf("\n"); 4144 } 4145 #endif 4146 } 4147 4148 /* 4149 * Build a probe request frame. Most of the following code is a 4150 * copy & paste of what is done in net80211. 4151 */ 4152 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4153 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4154 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4155 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4156 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4157 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4158 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4159 4160 frm = (uint8_t *)(wh + 1); 4161 frm = ieee80211_add_ssid(frm, NULL, 0); 4162 frm = ieee80211_add_rates(frm, rs); 4163 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4164 frm = ieee80211_add_xrates(frm, rs); 4165 4166 /* Set length of probe request. */ 4167 tx->len = htole16(frm - (uint8_t *)wh); 4168 4169 /* 4170 * Construct information about the channel that we 4171 * want to scan. The firmware expects this to be directly 4172 * after the scan probe request 4173 */ 4174 chan = (struct wpi_scan_chan *)frm; 4175 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4176 chan->flags = 0; 4177 if (nssid) { 4178 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4179 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4180 } else 4181 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4182 4183 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4184 chan->flags |= WPI_CHAN_ACTIVE; 4185 4186 /* 4187 * Calculate the active/passive dwell times. 4188 */ 4189 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4190 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4191 4192 /* Make sure they're valid. */ 4193 if (dwell_active > dwell_passive) 4194 dwell_active = dwell_passive; 4195 4196 chan->active = htole16(dwell_active); 4197 chan->passive = htole16(dwell_passive); 4198 4199 chan->dsp_gain = 0x6e; /* Default level */ 4200 4201 if (IEEE80211_IS_CHAN_5GHZ(c)) 4202 chan->rf_gain = 0x3b; 4203 else 4204 chan->rf_gain = 0x28; 4205 4206 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4207 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4208 4209 hdr->nchan++; 4210 4211 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4212 /* XXX Force probe request transmission. */ 4213 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4214 4215 chan++; 4216 4217 /* Reduce unnecessary delay. */ 4218 chan->flags = 0; 4219 chan->passive = chan->active = hdr->quiet_time; 4220 4221 hdr->nchan++; 4222 } 4223 4224 chan++; 4225 4226 buflen = (uint8_t *)chan - buf; 4227 hdr->len = htole16(buflen); 4228 4229 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4230 hdr->nchan); 4231 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4232 free(buf, M_DEVBUF); 4233 4234 if (error != 0) 4235 goto fail; 4236 4237 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4238 4239 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4240 4241 return 0; 4242 4243 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4244 4245 return error; 4246 } 4247 4248 static int 4249 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4250 { 4251 struct ieee80211com *ic = vap->iv_ic; 4252 struct ieee80211_node *ni = vap->iv_bss; 4253 struct ieee80211_channel *c = ni->ni_chan; 4254 int error; 4255 4256 WPI_RXON_LOCK(sc); 4257 4258 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4259 4260 /* Update adapter configuration. */ 4261 sc->rxon.associd = 0; 4262 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4263 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4264 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4265 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4266 if (IEEE80211_IS_CHAN_2GHZ(c)) 4267 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4268 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4269 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4270 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4271 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4272 if (IEEE80211_IS_CHAN_A(c)) { 4273 sc->rxon.cck_mask = 0; 4274 sc->rxon.ofdm_mask = 0x15; 4275 } else if (IEEE80211_IS_CHAN_B(c)) { 4276 sc->rxon.cck_mask = 0x03; 4277 sc->rxon.ofdm_mask = 0; 4278 } else { 4279 /* Assume 802.11b/g. */ 4280 sc->rxon.cck_mask = 0x0f; 4281 sc->rxon.ofdm_mask = 0x15; 4282 } 4283 4284 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4285 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4286 sc->rxon.ofdm_mask); 4287 4288 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4289 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4290 __func__); 4291 } 4292 4293 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4294 4295 WPI_RXON_UNLOCK(sc); 4296 4297 return error; 4298 } 4299 4300 static int 4301 wpi_config_beacon(struct wpi_vap *wvp) 4302 { 4303 struct ieee80211vap *vap = &wvp->wv_vap; 4304 struct ieee80211com *ic = vap->iv_ic; 4305 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4306 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4307 struct wpi_softc *sc = ic->ic_softc; 4308 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4309 struct ieee80211_tim_ie *tie; 4310 struct mbuf *m; 4311 uint8_t *ptr; 4312 int error; 4313 4314 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4315 4316 WPI_VAP_LOCK_ASSERT(wvp); 4317 4318 cmd->len = htole16(bcn->m->m_pkthdr.len); 4319 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4320 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4321 4322 /* XXX seems to be unused */ 4323 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4324 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4325 ptr = mtod(bcn->m, uint8_t *); 4326 4327 cmd->tim = htole16(bo->bo_tim - ptr); 4328 cmd->timsz = tie->tim_len; 4329 } 4330 4331 /* Necessary for recursion in ieee80211_beacon_update(). */ 4332 m = bcn->m; 4333 bcn->m = m_dup(m, M_NOWAIT); 4334 if (bcn->m == NULL) { 4335 device_printf(sc->sc_dev, 4336 "%s: could not copy beacon frame\n", __func__); 4337 error = ENOMEM; 4338 goto end; 4339 } 4340 4341 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4342 device_printf(sc->sc_dev, 4343 "%s: could not update beacon frame, error %d", __func__, 4344 error); 4345 } 4346 4347 /* Restore mbuf. */ 4348 end: bcn->m = m; 4349 4350 return error; 4351 } 4352 4353 static int 4354 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4355 { 4356 struct ieee80211vap *vap = ni->ni_vap; 4357 struct wpi_vap *wvp = WPI_VAP(vap); 4358 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4359 struct mbuf *m; 4360 int error; 4361 4362 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4363 4364 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4365 return EINVAL; 4366 4367 m = ieee80211_beacon_alloc(ni); 4368 if (m == NULL) { 4369 device_printf(sc->sc_dev, 4370 "%s: could not allocate beacon frame\n", __func__); 4371 return ENOMEM; 4372 } 4373 4374 WPI_VAP_LOCK(wvp); 4375 if (bcn->m != NULL) 4376 m_freem(bcn->m); 4377 4378 bcn->m = m; 4379 4380 error = wpi_config_beacon(wvp); 4381 WPI_VAP_UNLOCK(wvp); 4382 4383 return error; 4384 } 4385 4386 static void 4387 wpi_update_beacon(struct ieee80211vap *vap, int item) 4388 { 4389 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4390 struct wpi_vap *wvp = WPI_VAP(vap); 4391 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4392 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4393 struct ieee80211_node *ni = vap->iv_bss; 4394 int mcast = 0; 4395 4396 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4397 4398 WPI_VAP_LOCK(wvp); 4399 if (bcn->m == NULL) { 4400 bcn->m = ieee80211_beacon_alloc(ni); 4401 if (bcn->m == NULL) { 4402 device_printf(sc->sc_dev, 4403 "%s: could not allocate beacon frame\n", __func__); 4404 4405 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4406 __func__); 4407 4408 WPI_VAP_UNLOCK(wvp); 4409 return; 4410 } 4411 } 4412 WPI_VAP_UNLOCK(wvp); 4413 4414 if (item == IEEE80211_BEACON_TIM) 4415 mcast = 1; /* TODO */ 4416 4417 setbit(bo->bo_flags, item); 4418 ieee80211_beacon_update(ni, bcn->m, mcast); 4419 4420 WPI_VAP_LOCK(wvp); 4421 wpi_config_beacon(wvp); 4422 WPI_VAP_UNLOCK(wvp); 4423 4424 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4425 } 4426 4427 static void 4428 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4429 { 4430 struct ieee80211vap *vap = ni->ni_vap; 4431 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4432 struct wpi_node *wn = WPI_NODE(ni); 4433 int error; 4434 4435 WPI_NT_LOCK(sc); 4436 4437 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4438 4439 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4440 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4441 device_printf(sc->sc_dev, 4442 "%s: could not add IBSS node, error %d\n", 4443 __func__, error); 4444 } 4445 } 4446 WPI_NT_UNLOCK(sc); 4447 } 4448 4449 static int 4450 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4451 { 4452 struct ieee80211com *ic = vap->iv_ic; 4453 struct ieee80211_node *ni = vap->iv_bss; 4454 struct ieee80211_channel *c = ni->ni_chan; 4455 int error; 4456 4457 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4458 4459 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4460 /* Link LED blinks while monitoring. */ 4461 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4462 return 0; 4463 } 4464 4465 /* XXX kernel panic workaround */ 4466 if (c == IEEE80211_CHAN_ANYC) { 4467 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4468 __func__); 4469 return EINVAL; 4470 } 4471 4472 if ((error = wpi_set_timing(sc, ni)) != 0) { 4473 device_printf(sc->sc_dev, 4474 "%s: could not set timing, error %d\n", __func__, error); 4475 return error; 4476 } 4477 4478 /* Update adapter configuration. */ 4479 WPI_RXON_LOCK(sc); 4480 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4481 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4482 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4483 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4484 if (IEEE80211_IS_CHAN_2GHZ(c)) 4485 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4486 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4487 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4488 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4489 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4490 if (IEEE80211_IS_CHAN_A(c)) { 4491 sc->rxon.cck_mask = 0; 4492 sc->rxon.ofdm_mask = 0x15; 4493 } else if (IEEE80211_IS_CHAN_B(c)) { 4494 sc->rxon.cck_mask = 0x03; 4495 sc->rxon.ofdm_mask = 0; 4496 } else { 4497 /* Assume 802.11b/g. */ 4498 sc->rxon.cck_mask = 0x0f; 4499 sc->rxon.ofdm_mask = 0x15; 4500 } 4501 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4502 4503 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4504 sc->rxon.chan, sc->rxon.flags); 4505 4506 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4507 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4508 __func__); 4509 return error; 4510 } 4511 4512 /* Start periodic calibration timer. */ 4513 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4514 4515 WPI_RXON_UNLOCK(sc); 4516 4517 if (vap->iv_opmode == IEEE80211_M_IBSS || 4518 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4519 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4520 device_printf(sc->sc_dev, 4521 "%s: could not setup beacon, error %d\n", __func__, 4522 error); 4523 return error; 4524 } 4525 } 4526 4527 if (vap->iv_opmode == IEEE80211_M_STA) { 4528 /* Add BSS node. */ 4529 WPI_NT_LOCK(sc); 4530 error = wpi_add_sta_node(sc, ni); 4531 WPI_NT_UNLOCK(sc); 4532 if (error != 0) { 4533 device_printf(sc->sc_dev, 4534 "%s: could not add BSS node, error %d\n", __func__, 4535 error); 4536 return error; 4537 } 4538 } 4539 4540 /* Link LED always on while associated. */ 4541 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4542 4543 /* Enable power-saving mode if requested by user. */ 4544 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4545 vap->iv_opmode != IEEE80211_M_IBSS) 4546 (void)wpi_set_pslevel(sc, 0, 3, 1); 4547 4548 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4549 4550 return 0; 4551 } 4552 4553 static int 4554 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4555 { 4556 const struct ieee80211_cipher *cip = k->wk_cipher; 4557 struct ieee80211vap *vap = ni->ni_vap; 4558 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4559 struct wpi_node *wn = WPI_NODE(ni); 4560 struct wpi_node_info node; 4561 uint16_t kflags; 4562 int error; 4563 4564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4565 4566 if (wpi_check_node_entry(sc, wn->id) == 0) { 4567 device_printf(sc->sc_dev, "%s: node does not exist\n", 4568 __func__); 4569 return 0; 4570 } 4571 4572 switch (cip->ic_cipher) { 4573 case IEEE80211_CIPHER_AES_CCM: 4574 kflags = WPI_KFLAG_CCMP; 4575 break; 4576 4577 default: 4578 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4579 cip->ic_cipher); 4580 return 0; 4581 } 4582 4583 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4584 if (k->wk_flags & IEEE80211_KEY_GROUP) 4585 kflags |= WPI_KFLAG_MULTICAST; 4586 4587 memset(&node, 0, sizeof node); 4588 node.id = wn->id; 4589 node.control = WPI_NODE_UPDATE; 4590 node.flags = WPI_FLAG_KEY_SET; 4591 node.kflags = htole16(kflags); 4592 memcpy(node.key, k->wk_key, k->wk_keylen); 4593 again: 4594 DPRINTF(sc, WPI_DEBUG_KEY, 4595 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4596 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4597 node.id, ether_sprintf(ni->ni_macaddr)); 4598 4599 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4600 if (error != 0) { 4601 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4602 error); 4603 return !error; 4604 } 4605 4606 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4607 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4608 kflags |= WPI_KFLAG_MULTICAST; 4609 node.kflags = htole16(kflags); 4610 4611 goto again; 4612 } 4613 4614 return 1; 4615 } 4616 4617 static void 4618 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4619 { 4620 const struct ieee80211_key *k = arg; 4621 struct ieee80211vap *vap = ni->ni_vap; 4622 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4623 struct wpi_node *wn = WPI_NODE(ni); 4624 int error; 4625 4626 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4627 return; 4628 4629 WPI_NT_LOCK(sc); 4630 error = wpi_load_key(ni, k); 4631 WPI_NT_UNLOCK(sc); 4632 4633 if (error == 0) { 4634 device_printf(sc->sc_dev, "%s: error while setting key\n", 4635 __func__); 4636 } 4637 } 4638 4639 static int 4640 wpi_set_global_keys(struct ieee80211_node *ni) 4641 { 4642 struct ieee80211vap *vap = ni->ni_vap; 4643 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4644 int error = 1; 4645 4646 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4647 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4648 error = wpi_load_key(ni, wk); 4649 4650 return !error; 4651 } 4652 4653 static int 4654 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4655 { 4656 struct ieee80211vap *vap = ni->ni_vap; 4657 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4658 struct wpi_node *wn = WPI_NODE(ni); 4659 struct wpi_node_info node; 4660 uint16_t kflags; 4661 int error; 4662 4663 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4664 4665 if (wpi_check_node_entry(sc, wn->id) == 0) { 4666 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4667 return 1; /* Nothing to do. */ 4668 } 4669 4670 kflags = WPI_KFLAG_KID(k->wk_keyix); 4671 if (k->wk_flags & IEEE80211_KEY_GROUP) 4672 kflags |= WPI_KFLAG_MULTICAST; 4673 4674 memset(&node, 0, sizeof node); 4675 node.id = wn->id; 4676 node.control = WPI_NODE_UPDATE; 4677 node.flags = WPI_FLAG_KEY_SET; 4678 node.kflags = htole16(kflags); 4679 again: 4680 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4681 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4682 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4683 4684 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4685 if (error != 0) { 4686 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4687 error); 4688 return !error; 4689 } 4690 4691 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4692 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4693 kflags |= WPI_KFLAG_MULTICAST; 4694 node.kflags = htole16(kflags); 4695 4696 goto again; 4697 } 4698 4699 return 1; 4700 } 4701 4702 static void 4703 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4704 { 4705 const struct ieee80211_key *k = arg; 4706 struct ieee80211vap *vap = ni->ni_vap; 4707 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4708 struct wpi_node *wn = WPI_NODE(ni); 4709 int error; 4710 4711 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4712 return; 4713 4714 WPI_NT_LOCK(sc); 4715 error = wpi_del_key(ni, k); 4716 WPI_NT_UNLOCK(sc); 4717 4718 if (error == 0) { 4719 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4720 __func__); 4721 } 4722 } 4723 4724 static int 4725 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4726 int set) 4727 { 4728 struct ieee80211com *ic = vap->iv_ic; 4729 struct wpi_softc *sc = ic->ic_softc; 4730 struct wpi_vap *wvp = WPI_VAP(vap); 4731 struct ieee80211_node *ni; 4732 int error, ni_ref = 0; 4733 4734 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4735 4736 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4737 /* Not for us. */ 4738 return 1; 4739 } 4740 4741 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4742 /* XMIT keys are handled in wpi_tx_data(). */ 4743 return 1; 4744 } 4745 4746 /* Handle group keys. */ 4747 if (&vap->iv_nw_keys[0] <= k && 4748 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4749 WPI_NT_LOCK(sc); 4750 if (set) 4751 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4752 else 4753 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4754 WPI_NT_UNLOCK(sc); 4755 4756 if (vap->iv_state == IEEE80211_S_RUN) { 4757 ieee80211_iterate_nodes(&ic->ic_sta, 4758 set ? wpi_load_key_cb : wpi_del_key_cb, 4759 __DECONST(void *, k)); 4760 } 4761 4762 return 1; 4763 } 4764 4765 switch (vap->iv_opmode) { 4766 case IEEE80211_M_STA: 4767 ni = vap->iv_bss; 4768 break; 4769 4770 case IEEE80211_M_IBSS: 4771 case IEEE80211_M_AHDEMO: 4772 case IEEE80211_M_HOSTAP: 4773 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4774 if (ni == NULL) 4775 return 0; /* should not happen */ 4776 4777 ni_ref = 1; 4778 break; 4779 4780 default: 4781 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4782 vap->iv_opmode); 4783 return 0; 4784 } 4785 4786 WPI_NT_LOCK(sc); 4787 if (set) 4788 error = wpi_load_key(ni, k); 4789 else 4790 error = wpi_del_key(ni, k); 4791 WPI_NT_UNLOCK(sc); 4792 4793 if (ni_ref) 4794 ieee80211_node_decref(ni); 4795 4796 return error; 4797 } 4798 4799 static int 4800 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4801 { 4802 return wpi_process_key(vap, k, 1); 4803 } 4804 4805 static int 4806 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4807 { 4808 return wpi_process_key(vap, k, 0); 4809 } 4810 4811 /* 4812 * This function is called after the runtime firmware notifies us of its 4813 * readiness (called in a process context). 4814 */ 4815 static int 4816 wpi_post_alive(struct wpi_softc *sc) 4817 { 4818 int ntries, error; 4819 4820 /* Check (again) that the radio is not disabled. */ 4821 if ((error = wpi_nic_lock(sc)) != 0) 4822 return error; 4823 4824 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4825 4826 /* NB: Runtime firmware must be up and running. */ 4827 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4828 device_printf(sc->sc_dev, 4829 "RF switch: radio disabled (%s)\n", __func__); 4830 wpi_nic_unlock(sc); 4831 return EPERM; /* :-) */ 4832 } 4833 wpi_nic_unlock(sc); 4834 4835 /* Wait for thermal sensor to calibrate. */ 4836 for (ntries = 0; ntries < 1000; ntries++) { 4837 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4838 break; 4839 DELAY(10); 4840 } 4841 4842 if (ntries == 1000) { 4843 device_printf(sc->sc_dev, 4844 "timeout waiting for thermal sensor calibration\n"); 4845 return ETIMEDOUT; 4846 } 4847 4848 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4849 return 0; 4850 } 4851 4852 /* 4853 * The firmware boot code is small and is intended to be copied directly into 4854 * the NIC internal memory (no DMA transfer). 4855 */ 4856 static int 4857 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4858 { 4859 int error, ntries; 4860 4861 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4862 4863 size /= sizeof (uint32_t); 4864 4865 if ((error = wpi_nic_lock(sc)) != 0) 4866 return error; 4867 4868 /* Copy microcode image into NIC memory. */ 4869 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4870 (const uint32_t *)ucode, size); 4871 4872 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4873 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4874 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4875 4876 /* Start boot load now. */ 4877 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4878 4879 /* Wait for transfer to complete. */ 4880 for (ntries = 0; ntries < 1000; ntries++) { 4881 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4882 DPRINTF(sc, WPI_DEBUG_HW, 4883 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4884 WPI_FH_TX_STATUS_IDLE(6), 4885 status & WPI_FH_TX_STATUS_IDLE(6)); 4886 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4887 DPRINTF(sc, WPI_DEBUG_HW, 4888 "Status Match! - ntries = %d\n", ntries); 4889 break; 4890 } 4891 DELAY(10); 4892 } 4893 if (ntries == 1000) { 4894 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4895 __func__); 4896 wpi_nic_unlock(sc); 4897 return ETIMEDOUT; 4898 } 4899 4900 /* Enable boot after power up. */ 4901 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4902 4903 wpi_nic_unlock(sc); 4904 return 0; 4905 } 4906 4907 static int 4908 wpi_load_firmware(struct wpi_softc *sc) 4909 { 4910 struct wpi_fw_info *fw = &sc->fw; 4911 struct wpi_dma_info *dma = &sc->fw_dma; 4912 int error; 4913 4914 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4915 4916 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4917 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4918 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4919 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4920 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4921 4922 /* Tell adapter where to find initialization sections. */ 4923 if ((error = wpi_nic_lock(sc)) != 0) 4924 return error; 4925 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4926 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4927 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4928 dma->paddr + WPI_FW_DATA_MAXSZ); 4929 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4930 wpi_nic_unlock(sc); 4931 4932 /* Load firmware boot code. */ 4933 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4934 if (error != 0) { 4935 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4936 __func__); 4937 return error; 4938 } 4939 4940 /* Now press "execute". */ 4941 WPI_WRITE(sc, WPI_RESET, 0); 4942 4943 /* Wait at most one second for first alive notification. */ 4944 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4945 device_printf(sc->sc_dev, 4946 "%s: timeout waiting for adapter to initialize, error %d\n", 4947 __func__, error); 4948 return error; 4949 } 4950 4951 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4952 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4953 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4954 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4955 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4956 4957 /* Tell adapter where to find runtime sections. */ 4958 if ((error = wpi_nic_lock(sc)) != 0) 4959 return error; 4960 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4961 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4962 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4963 dma->paddr + WPI_FW_DATA_MAXSZ); 4964 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4965 WPI_FW_UPDATED | fw->main.textsz); 4966 wpi_nic_unlock(sc); 4967 4968 return 0; 4969 } 4970 4971 static int 4972 wpi_read_firmware(struct wpi_softc *sc) 4973 { 4974 const struct firmware *fp; 4975 struct wpi_fw_info *fw = &sc->fw; 4976 const struct wpi_firmware_hdr *hdr; 4977 int error; 4978 4979 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4980 4981 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4982 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4983 4984 WPI_UNLOCK(sc); 4985 fp = firmware_get(WPI_FW_NAME); 4986 WPI_LOCK(sc); 4987 4988 if (fp == NULL) { 4989 device_printf(sc->sc_dev, 4990 "could not load firmware image '%s'\n", WPI_FW_NAME); 4991 return EINVAL; 4992 } 4993 4994 sc->fw_fp = fp; 4995 4996 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 4997 device_printf(sc->sc_dev, 4998 "firmware file too short: %zu bytes\n", fp->datasize); 4999 error = EINVAL; 5000 goto fail; 5001 } 5002 5003 fw->size = fp->datasize; 5004 fw->data = (const uint8_t *)fp->data; 5005 5006 /* Extract firmware header information. */ 5007 hdr = (const struct wpi_firmware_hdr *)fw->data; 5008 5009 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5010 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5011 5012 fw->main.textsz = le32toh(hdr->rtextsz); 5013 fw->main.datasz = le32toh(hdr->rdatasz); 5014 fw->init.textsz = le32toh(hdr->itextsz); 5015 fw->init.datasz = le32toh(hdr->idatasz); 5016 fw->boot.textsz = le32toh(hdr->btextsz); 5017 fw->boot.datasz = 0; 5018 5019 /* Sanity-check firmware header. */ 5020 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5021 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5022 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5023 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5024 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5025 (fw->boot.textsz & 3) != 0) { 5026 device_printf(sc->sc_dev, "invalid firmware header\n"); 5027 error = EINVAL; 5028 goto fail; 5029 } 5030 5031 /* Check that all firmware sections fit. */ 5032 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5033 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5034 device_printf(sc->sc_dev, 5035 "firmware file too short: %zu bytes\n", fw->size); 5036 error = EINVAL; 5037 goto fail; 5038 } 5039 5040 /* Get pointers to firmware sections. */ 5041 fw->main.text = (const uint8_t *)(hdr + 1); 5042 fw->main.data = fw->main.text + fw->main.textsz; 5043 fw->init.text = fw->main.data + fw->main.datasz; 5044 fw->init.data = fw->init.text + fw->init.textsz; 5045 fw->boot.text = fw->init.data + fw->init.datasz; 5046 5047 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5048 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5049 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5050 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5051 fw->main.textsz, fw->main.datasz, 5052 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5053 5054 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5055 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5056 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5057 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5058 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5059 5060 return 0; 5061 5062 fail: wpi_unload_firmware(sc); 5063 return error; 5064 } 5065 5066 /** 5067 * Free the referenced firmware image 5068 */ 5069 static void 5070 wpi_unload_firmware(struct wpi_softc *sc) 5071 { 5072 if (sc->fw_fp != NULL) { 5073 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5074 sc->fw_fp = NULL; 5075 } 5076 } 5077 5078 static int 5079 wpi_clock_wait(struct wpi_softc *sc) 5080 { 5081 int ntries; 5082 5083 /* Set "initialization complete" bit. */ 5084 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5085 5086 /* Wait for clock stabilization. */ 5087 for (ntries = 0; ntries < 2500; ntries++) { 5088 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5089 return 0; 5090 DELAY(100); 5091 } 5092 device_printf(sc->sc_dev, 5093 "%s: timeout waiting for clock stabilization\n", __func__); 5094 5095 return ETIMEDOUT; 5096 } 5097 5098 static int 5099 wpi_apm_init(struct wpi_softc *sc) 5100 { 5101 uint32_t reg; 5102 int error; 5103 5104 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5105 5106 /* Disable L0s exit timer (NMI bug workaround). */ 5107 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5108 /* Don't wait for ICH L0s (ICH bug workaround). */ 5109 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5110 5111 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5112 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5113 5114 /* Retrieve PCIe Active State Power Management (ASPM). */ 5115 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5116 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5117 if (reg & 0x02) /* L1 Entry enabled. */ 5118 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5119 else 5120 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5121 5122 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5123 5124 /* Wait for clock stabilization before accessing prph. */ 5125 if ((error = wpi_clock_wait(sc)) != 0) 5126 return error; 5127 5128 if ((error = wpi_nic_lock(sc)) != 0) 5129 return error; 5130 /* Cleanup. */ 5131 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5132 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5133 5134 /* Enable DMA and BSM (Bootstrap State Machine). */ 5135 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5136 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5137 DELAY(20); 5138 /* Disable L1-Active. */ 5139 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5140 wpi_nic_unlock(sc); 5141 5142 return 0; 5143 } 5144 5145 static void 5146 wpi_apm_stop_master(struct wpi_softc *sc) 5147 { 5148 int ntries; 5149 5150 /* Stop busmaster DMA activity. */ 5151 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5152 5153 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5154 WPI_GP_CNTRL_MAC_PS) 5155 return; /* Already asleep. */ 5156 5157 for (ntries = 0; ntries < 100; ntries++) { 5158 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5159 return; 5160 DELAY(10); 5161 } 5162 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5163 __func__); 5164 } 5165 5166 static void 5167 wpi_apm_stop(struct wpi_softc *sc) 5168 { 5169 wpi_apm_stop_master(sc); 5170 5171 /* Reset the entire device. */ 5172 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5173 DELAY(10); 5174 /* Clear "initialization complete" bit. */ 5175 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5176 } 5177 5178 static void 5179 wpi_nic_config(struct wpi_softc *sc) 5180 { 5181 uint32_t rev; 5182 5183 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5184 5185 /* voodoo from the Linux "driver".. */ 5186 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5187 if ((rev & 0xc0) == 0x40) 5188 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5189 else if (!(rev & 0x80)) 5190 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5191 5192 if (sc->cap == 0x80) 5193 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5194 5195 if ((sc->rev & 0xf0) == 0xd0) 5196 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5197 else 5198 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5199 5200 if (sc->type > 1) 5201 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5202 } 5203 5204 static int 5205 wpi_hw_init(struct wpi_softc *sc) 5206 { 5207 int chnl, ntries, error; 5208 5209 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5210 5211 /* Clear pending interrupts. */ 5212 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5213 5214 if ((error = wpi_apm_init(sc)) != 0) { 5215 device_printf(sc->sc_dev, 5216 "%s: could not power ON adapter, error %d\n", __func__, 5217 error); 5218 return error; 5219 } 5220 5221 /* Select VMAIN power source. */ 5222 if ((error = wpi_nic_lock(sc)) != 0) 5223 return error; 5224 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5225 wpi_nic_unlock(sc); 5226 /* Spin until VMAIN gets selected. */ 5227 for (ntries = 0; ntries < 5000; ntries++) { 5228 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5229 break; 5230 DELAY(10); 5231 } 5232 if (ntries == 5000) { 5233 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5234 return ETIMEDOUT; 5235 } 5236 5237 /* Perform adapter initialization. */ 5238 wpi_nic_config(sc); 5239 5240 /* Initialize RX ring. */ 5241 if ((error = wpi_nic_lock(sc)) != 0) 5242 return error; 5243 /* Set physical address of RX ring. */ 5244 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5245 /* Set physical address of RX read pointer. */ 5246 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5247 offsetof(struct wpi_shared, next)); 5248 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5249 /* Enable RX. */ 5250 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5251 WPI_FH_RX_CONFIG_DMA_ENA | 5252 WPI_FH_RX_CONFIG_RDRBD_ENA | 5253 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5254 WPI_FH_RX_CONFIG_MAXFRAG | 5255 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5256 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5257 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5258 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5259 wpi_nic_unlock(sc); 5260 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5261 5262 /* Initialize TX rings. */ 5263 if ((error = wpi_nic_lock(sc)) != 0) 5264 return error; 5265 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5266 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5267 /* Enable all 6 TX rings. */ 5268 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5269 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5270 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5271 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5272 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5273 /* Set physical address of TX rings. */ 5274 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5275 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5276 5277 /* Enable all DMA channels. */ 5278 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5279 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5280 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5281 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5282 } 5283 wpi_nic_unlock(sc); 5284 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5285 5286 /* Clear "radio off" and "commands blocked" bits. */ 5287 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5288 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5289 5290 /* Clear pending interrupts. */ 5291 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5292 /* Enable interrupts. */ 5293 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5294 5295 /* _Really_ make sure "radio off" bit is cleared! */ 5296 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5297 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5298 5299 if ((error = wpi_load_firmware(sc)) != 0) { 5300 device_printf(sc->sc_dev, 5301 "%s: could not load firmware, error %d\n", __func__, 5302 error); 5303 return error; 5304 } 5305 /* Wait at most one second for firmware alive notification. */ 5306 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5307 device_printf(sc->sc_dev, 5308 "%s: timeout waiting for adapter to initialize, error %d\n", 5309 __func__, error); 5310 return error; 5311 } 5312 5313 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5314 5315 /* Do post-firmware initialization. */ 5316 return wpi_post_alive(sc); 5317 } 5318 5319 static void 5320 wpi_hw_stop(struct wpi_softc *sc) 5321 { 5322 int chnl, qid, ntries; 5323 5324 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5325 5326 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5327 wpi_nic_lock(sc); 5328 5329 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5330 5331 /* Disable interrupts. */ 5332 WPI_WRITE(sc, WPI_INT_MASK, 0); 5333 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5334 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5335 5336 /* Make sure we no longer hold the NIC lock. */ 5337 wpi_nic_unlock(sc); 5338 5339 if (wpi_nic_lock(sc) == 0) { 5340 /* Stop TX scheduler. */ 5341 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5342 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5343 5344 /* Stop all DMA channels. */ 5345 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5346 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5347 for (ntries = 0; ntries < 200; ntries++) { 5348 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5349 WPI_FH_TX_STATUS_IDLE(chnl)) 5350 break; 5351 DELAY(10); 5352 } 5353 } 5354 wpi_nic_unlock(sc); 5355 } 5356 5357 /* Stop RX ring. */ 5358 wpi_reset_rx_ring(sc); 5359 5360 /* Reset all TX rings. */ 5361 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5362 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5363 5364 if (wpi_nic_lock(sc) == 0) { 5365 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5366 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5367 wpi_nic_unlock(sc); 5368 } 5369 DELAY(5); 5370 /* Power OFF adapter. */ 5371 wpi_apm_stop(sc); 5372 } 5373 5374 static void 5375 wpi_radio_on(void *arg0, int pending) 5376 { 5377 struct wpi_softc *sc = arg0; 5378 struct ieee80211com *ic = &sc->sc_ic; 5379 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5380 5381 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5382 5383 WPI_LOCK(sc); 5384 callout_stop(&sc->watchdog_rfkill); 5385 WPI_UNLOCK(sc); 5386 5387 if (vap != NULL) 5388 ieee80211_init(vap); 5389 } 5390 5391 static void 5392 wpi_radio_off(void *arg0, int pending) 5393 { 5394 struct wpi_softc *sc = arg0; 5395 struct ieee80211com *ic = &sc->sc_ic; 5396 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5397 5398 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5399 5400 ieee80211_notify_radio(ic, 0); 5401 wpi_stop(sc); 5402 if (vap != NULL) 5403 ieee80211_stop(vap); 5404 5405 WPI_LOCK(sc); 5406 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5407 WPI_UNLOCK(sc); 5408 } 5409 5410 static int 5411 wpi_init(struct wpi_softc *sc) 5412 { 5413 int error = 0; 5414 5415 WPI_LOCK(sc); 5416 5417 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5418 5419 if (sc->sc_running != 0) 5420 goto end; 5421 5422 /* Check that the radio is not disabled by hardware switch. */ 5423 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5424 device_printf(sc->sc_dev, 5425 "RF switch: radio disabled (%s)\n", __func__); 5426 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5427 sc); 5428 error = EINPROGRESS; 5429 goto end; 5430 } 5431 5432 /* Read firmware images from the filesystem. */ 5433 if ((error = wpi_read_firmware(sc)) != 0) { 5434 device_printf(sc->sc_dev, 5435 "%s: could not read firmware, error %d\n", __func__, 5436 error); 5437 goto end; 5438 } 5439 5440 sc->sc_running = 1; 5441 5442 /* Initialize hardware and upload firmware. */ 5443 error = wpi_hw_init(sc); 5444 wpi_unload_firmware(sc); 5445 if (error != 0) { 5446 device_printf(sc->sc_dev, 5447 "%s: could not initialize hardware, error %d\n", __func__, 5448 error); 5449 goto fail; 5450 } 5451 5452 /* Configure adapter now that it is ready. */ 5453 if ((error = wpi_config(sc)) != 0) { 5454 device_printf(sc->sc_dev, 5455 "%s: could not configure device, error %d\n", __func__, 5456 error); 5457 goto fail; 5458 } 5459 5460 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5461 5462 WPI_UNLOCK(sc); 5463 5464 return 0; 5465 5466 fail: wpi_stop_locked(sc); 5467 5468 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5469 WPI_UNLOCK(sc); 5470 5471 return error; 5472 } 5473 5474 static void 5475 wpi_stop_locked(struct wpi_softc *sc) 5476 { 5477 5478 WPI_LOCK_ASSERT(sc); 5479 5480 if (sc->sc_running == 0) 5481 return; 5482 5483 WPI_TX_LOCK(sc); 5484 WPI_TXQ_LOCK(sc); 5485 sc->sc_running = 0; 5486 WPI_TXQ_UNLOCK(sc); 5487 WPI_TX_UNLOCK(sc); 5488 5489 WPI_TXQ_STATE_LOCK(sc); 5490 callout_stop(&sc->tx_timeout); 5491 WPI_TXQ_STATE_UNLOCK(sc); 5492 5493 WPI_RXON_LOCK(sc); 5494 callout_stop(&sc->scan_timeout); 5495 callout_stop(&sc->calib_to); 5496 WPI_RXON_UNLOCK(sc); 5497 5498 /* Power OFF hardware. */ 5499 wpi_hw_stop(sc); 5500 } 5501 5502 static void 5503 wpi_stop(struct wpi_softc *sc) 5504 { 5505 WPI_LOCK(sc); 5506 wpi_stop_locked(sc); 5507 WPI_UNLOCK(sc); 5508 } 5509 5510 /* 5511 * Callback from net80211 to start a scan. 5512 */ 5513 static void 5514 wpi_scan_start(struct ieee80211com *ic) 5515 { 5516 struct wpi_softc *sc = ic->ic_softc; 5517 5518 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5519 } 5520 5521 /* 5522 * Callback from net80211 to terminate a scan. 5523 */ 5524 static void 5525 wpi_scan_end(struct ieee80211com *ic) 5526 { 5527 struct wpi_softc *sc = ic->ic_softc; 5528 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5529 5530 if (vap->iv_state == IEEE80211_S_RUN) 5531 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5532 } 5533 5534 /** 5535 * Called by the net80211 framework to indicate to the driver 5536 * that the channel should be changed 5537 */ 5538 static void 5539 wpi_set_channel(struct ieee80211com *ic) 5540 { 5541 const struct ieee80211_channel *c = ic->ic_curchan; 5542 struct wpi_softc *sc = ic->ic_softc; 5543 int error; 5544 5545 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5546 5547 WPI_LOCK(sc); 5548 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5549 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5550 WPI_UNLOCK(sc); 5551 WPI_TX_LOCK(sc); 5552 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5553 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5554 WPI_TX_UNLOCK(sc); 5555 5556 /* 5557 * Only need to set the channel in Monitor mode. AP scanning and auth 5558 * are already taken care of by their respective firmware commands. 5559 */ 5560 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5561 WPI_RXON_LOCK(sc); 5562 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5563 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5564 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5565 WPI_RXON_24GHZ); 5566 } else { 5567 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5568 WPI_RXON_24GHZ); 5569 } 5570 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5571 device_printf(sc->sc_dev, 5572 "%s: error %d setting channel\n", __func__, 5573 error); 5574 WPI_RXON_UNLOCK(sc); 5575 } 5576 } 5577 5578 /** 5579 * Called by net80211 to indicate that we need to scan the current 5580 * channel. The channel is previously be set via the wpi_set_channel 5581 * callback. 5582 */ 5583 static void 5584 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5585 { 5586 struct ieee80211vap *vap = ss->ss_vap; 5587 struct ieee80211com *ic = vap->iv_ic; 5588 struct wpi_softc *sc = ic->ic_softc; 5589 int error; 5590 5591 WPI_RXON_LOCK(sc); 5592 error = wpi_scan(sc, ic->ic_curchan); 5593 WPI_RXON_UNLOCK(sc); 5594 if (error != 0) 5595 ieee80211_cancel_scan(vap); 5596 } 5597 5598 /** 5599 * Called by the net80211 framework to indicate 5600 * the minimum dwell time has been met, terminate the scan. 5601 * We don't actually terminate the scan as the firmware will notify 5602 * us when it's finished and we have no way to interrupt it. 5603 */ 5604 static void 5605 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5606 { 5607 /* NB: don't try to abort scan; wait for firmware to finish */ 5608 } 5609 5610 static void 5611 wpi_hw_reset(void *arg, int pending) 5612 { 5613 struct wpi_softc *sc = arg; 5614 struct ieee80211com *ic = &sc->sc_ic; 5615 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5616 5617 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5618 5619 ieee80211_notify_radio(ic, 0); 5620 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5621 ieee80211_cancel_scan(vap); 5622 5623 wpi_stop(sc); 5624 if (vap != NULL) { 5625 ieee80211_stop(vap); 5626 ieee80211_init(vap); 5627 } 5628 } 5629