1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 uint8_t); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 176 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 177 const uint8_t mac[IEEE80211_ADDR_LEN]); 178 static void wpi_node_free(struct ieee80211_node *); 179 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 180 const struct ieee80211_rx_stats *, 181 int, int); 182 static void wpi_restore_node(void *, struct ieee80211_node *); 183 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 184 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 185 static void wpi_calib_timeout(void *); 186 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 187 struct wpi_rx_data *); 188 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 189 struct wpi_rx_data *); 190 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 191 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_notif_intr(struct wpi_softc *); 193 static void wpi_wakeup_intr(struct wpi_softc *); 194 #ifdef WPI_DEBUG 195 static void wpi_debug_registers(struct wpi_softc *); 196 #endif 197 static void wpi_fatal_intr(struct wpi_softc *); 198 static void wpi_intr(void *); 199 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 209 static void wpi_watchdog_rfkill(void *); 210 static void wpi_scan_timeout(void *); 211 static void wpi_tx_timeout(void *); 212 static void wpi_parent(struct ieee80211com *); 213 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 214 int); 215 static int wpi_mrr_setup(struct wpi_softc *); 216 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 217 static int wpi_add_broadcast_node(struct wpi_softc *, int); 218 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 219 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 220 static int wpi_updateedca(struct ieee80211com *); 221 static void wpi_set_promisc(struct wpi_softc *); 222 static void wpi_update_promisc(struct ieee80211com *); 223 static void wpi_update_mcast(struct ieee80211com *); 224 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 225 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 226 static void wpi_power_calibration(struct wpi_softc *); 227 static int wpi_set_txpower(struct wpi_softc *, int); 228 static int wpi_get_power_index(struct wpi_softc *, 229 struct wpi_power_group *, uint8_t, int, int); 230 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 231 static int wpi_send_btcoex(struct wpi_softc *); 232 static int wpi_send_rxon(struct wpi_softc *, int, int); 233 static int wpi_config(struct wpi_softc *); 234 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 235 struct ieee80211_channel *, uint8_t); 236 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 237 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 238 struct ieee80211_channel *); 239 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 240 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 241 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 242 static int wpi_config_beacon(struct wpi_vap *); 243 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 244 static void wpi_update_beacon(struct ieee80211vap *, int); 245 static void wpi_newassoc(struct ieee80211_node *, int); 246 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 247 static int wpi_load_key(struct ieee80211_node *, 248 const struct ieee80211_key *); 249 static void wpi_load_key_cb(void *, struct ieee80211_node *); 250 static int wpi_set_global_keys(struct ieee80211_node *); 251 static int wpi_del_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_del_key_cb(void *, struct ieee80211_node *); 254 static int wpi_process_key(struct ieee80211vap *, 255 const struct ieee80211_key *, int); 256 static int wpi_key_set(struct ieee80211vap *, 257 const struct ieee80211_key *); 258 static int wpi_key_delete(struct ieee80211vap *, 259 const struct ieee80211_key *); 260 static int wpi_post_alive(struct wpi_softc *); 261 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 262 uint32_t); 263 static int wpi_load_firmware(struct wpi_softc *); 264 static int wpi_read_firmware(struct wpi_softc *); 265 static void wpi_unload_firmware(struct wpi_softc *); 266 static int wpi_clock_wait(struct wpi_softc *); 267 static int wpi_apm_init(struct wpi_softc *); 268 static void wpi_apm_stop_master(struct wpi_softc *); 269 static void wpi_apm_stop(struct wpi_softc *); 270 static void wpi_nic_config(struct wpi_softc *); 271 static int wpi_hw_init(struct wpi_softc *); 272 static void wpi_hw_stop(struct wpi_softc *); 273 static void wpi_radio_on(void *, int); 274 static void wpi_radio_off(void *, int); 275 static int wpi_init(struct wpi_softc *); 276 static void wpi_stop_locked(struct wpi_softc *); 277 static void wpi_stop(struct wpi_softc *); 278 static void wpi_scan_start(struct ieee80211com *); 279 static void wpi_scan_end(struct ieee80211com *); 280 static void wpi_set_channel(struct ieee80211com *); 281 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 282 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 283 static void wpi_hw_reset(void *, int); 284 285 static device_method_t wpi_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, wpi_probe), 288 DEVMETHOD(device_attach, wpi_attach), 289 DEVMETHOD(device_detach, wpi_detach), 290 DEVMETHOD(device_shutdown, wpi_shutdown), 291 DEVMETHOD(device_suspend, wpi_suspend), 292 DEVMETHOD(device_resume, wpi_resume), 293 294 DEVMETHOD_END 295 }; 296 297 static driver_t wpi_driver = { 298 "wpi", 299 wpi_methods, 300 sizeof (struct wpi_softc) 301 }; 302 static devclass_t wpi_devclass; 303 304 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 305 306 MODULE_VERSION(wpi, 1); 307 308 MODULE_DEPEND(wpi, pci, 1, 1, 1); 309 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 310 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 311 312 static int 313 wpi_probe(device_t dev) 314 { 315 const struct wpi_ident *ident; 316 317 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 318 if (pci_get_vendor(dev) == ident->vendor && 319 pci_get_device(dev) == ident->device) { 320 device_set_desc(dev, ident->name); 321 return (BUS_PROBE_DEFAULT); 322 } 323 } 324 return ENXIO; 325 } 326 327 static int 328 wpi_attach(device_t dev) 329 { 330 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 331 struct ieee80211com *ic; 332 uint8_t i; 333 int error, rid; 334 #ifdef WPI_DEBUG 335 int supportsa = 1; 336 const struct wpi_ident *ident; 337 #endif 338 339 sc->sc_dev = dev; 340 341 #ifdef WPI_DEBUG 342 error = resource_int_value(device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 344 if (error != 0) 345 sc->sc_debug = 0; 346 #else 347 sc->sc_debug = 0; 348 #endif 349 350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 351 352 /* 353 * Get the offset of the PCI Express Capability Structure in PCI 354 * Configuration Space. 355 */ 356 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 357 if (error != 0) { 358 device_printf(dev, "PCIe capability structure not found!\n"); 359 return error; 360 } 361 362 /* 363 * Some card's only support 802.11b/g not a, check to see if 364 * this is one such card. A 0x0 in the subdevice table indicates 365 * the entire subdevice range is to be ignored. 366 */ 367 #ifdef WPI_DEBUG 368 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 369 if (ident->subdevice && 370 pci_get_subdevice(dev) == ident->subdevice) { 371 supportsa = 0; 372 break; 373 } 374 } 375 #endif 376 377 /* Clear device-specific "PCI retry timeout" register (41h). */ 378 pci_write_config(dev, 0x41, 0, 1); 379 380 /* Enable bus-mastering. */ 381 pci_enable_busmaster(dev); 382 383 rid = PCIR_BAR(0); 384 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 385 RF_ACTIVE); 386 if (sc->mem == NULL) { 387 device_printf(dev, "can't map mem space\n"); 388 return ENOMEM; 389 } 390 sc->sc_st = rman_get_bustag(sc->mem); 391 sc->sc_sh = rman_get_bushandle(sc->mem); 392 393 rid = 1; 394 if (pci_alloc_msi(dev, &rid) == 0) 395 rid = 1; 396 else 397 rid = 0; 398 /* Install interrupt handler. */ 399 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 400 (rid != 0 ? 0 : RF_SHAREABLE)); 401 if (sc->irq == NULL) { 402 device_printf(dev, "can't map interrupt\n"); 403 error = ENOMEM; 404 goto fail; 405 } 406 407 WPI_LOCK_INIT(sc); 408 WPI_TX_LOCK_INIT(sc); 409 WPI_RXON_LOCK_INIT(sc); 410 WPI_NT_LOCK_INIT(sc); 411 WPI_TXQ_LOCK_INIT(sc); 412 WPI_TXQ_STATE_LOCK_INIT(sc); 413 414 /* Allocate DMA memory for firmware transfers. */ 415 if ((error = wpi_alloc_fwmem(sc)) != 0) { 416 device_printf(dev, 417 "could not allocate memory for firmware, error %d\n", 418 error); 419 goto fail; 420 } 421 422 /* Allocate shared page. */ 423 if ((error = wpi_alloc_shared(sc)) != 0) { 424 device_printf(dev, "could not allocate shared page\n"); 425 goto fail; 426 } 427 428 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 429 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 430 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 431 device_printf(dev, 432 "could not allocate TX ring %d, error %d\n", i, 433 error); 434 goto fail; 435 } 436 } 437 438 /* Allocate RX ring. */ 439 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 440 device_printf(dev, "could not allocate RX ring, error %d\n", 441 error); 442 goto fail; 443 } 444 445 /* Clear pending interrupts. */ 446 WPI_WRITE(sc, WPI_INT, 0xffffffff); 447 448 ic = &sc->sc_ic; 449 ic->ic_softc = sc; 450 ic->ic_name = device_get_nameunit(dev); 451 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 452 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 453 454 /* Set device capabilities. */ 455 ic->ic_caps = 456 IEEE80211_C_STA /* station mode supported */ 457 | IEEE80211_C_IBSS /* IBSS mode supported */ 458 | IEEE80211_C_HOSTAP /* Host access point mode */ 459 | IEEE80211_C_MONITOR /* monitor mode supported */ 460 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 461 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 462 | IEEE80211_C_TXFRAG /* handle tx frags */ 463 | IEEE80211_C_TXPMGT /* tx power management */ 464 | IEEE80211_C_SHSLOT /* short slot time supported */ 465 | IEEE80211_C_WPA /* 802.11i */ 466 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 467 | IEEE80211_C_WME /* 802.11e */ 468 | IEEE80211_C_PMGT /* Station-side power mgmt */ 469 ; 470 471 ic->ic_cryptocaps = 472 IEEE80211_CRYPTO_AES_CCM; 473 474 /* 475 * Read in the eeprom and also setup the channels for 476 * net80211. We don't set the rates as net80211 does this for us 477 */ 478 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 479 device_printf(dev, "could not read EEPROM, error %d\n", 480 error); 481 goto fail; 482 } 483 484 #ifdef WPI_DEBUG 485 if (bootverbose) { 486 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 487 sc->domain); 488 device_printf(sc->sc_dev, "Hardware Type: %c\n", 489 sc->type > 1 ? 'B': '?'); 490 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 491 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 492 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 493 supportsa ? "does" : "does not"); 494 495 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 496 check what sc->rev really represents - benjsc 20070615 */ 497 } 498 #endif 499 500 ieee80211_ifattach(ic); 501 ic->ic_vap_create = wpi_vap_create; 502 ic->ic_vap_delete = wpi_vap_delete; 503 ic->ic_parent = wpi_parent; 504 ic->ic_raw_xmit = wpi_raw_xmit; 505 ic->ic_transmit = wpi_transmit; 506 ic->ic_node_alloc = wpi_node_alloc; 507 sc->sc_node_free = ic->ic_node_free; 508 ic->ic_node_free = wpi_node_free; 509 ic->ic_wme.wme_update = wpi_updateedca; 510 ic->ic_update_promisc = wpi_update_promisc; 511 ic->ic_update_mcast = wpi_update_mcast; 512 ic->ic_newassoc = wpi_newassoc; 513 ic->ic_scan_start = wpi_scan_start; 514 ic->ic_scan_end = wpi_scan_end; 515 ic->ic_set_channel = wpi_set_channel; 516 ic->ic_scan_curchan = wpi_scan_curchan; 517 ic->ic_scan_mindwell = wpi_scan_mindwell; 518 ic->ic_setregdomain = wpi_setregdomain; 519 520 sc->sc_update_rx_ring = wpi_update_rx_ring; 521 sc->sc_update_tx_ring = wpi_update_tx_ring; 522 523 wpi_radiotap_attach(sc); 524 525 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 526 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 528 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 529 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 530 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 531 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 532 533 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 534 taskqueue_thread_enqueue, &sc->sc_tq); 535 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 536 if (error != 0) { 537 device_printf(dev, "can't start threads, error %d\n", error); 538 goto fail; 539 } 540 541 wpi_sysctlattach(sc); 542 543 /* 544 * Hook our interrupt after all initialization is complete. 545 */ 546 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 547 NULL, wpi_intr, sc, &sc->sc_ih); 548 if (error != 0) { 549 device_printf(dev, "can't establish interrupt, error %d\n", 550 error); 551 goto fail; 552 } 553 554 if (bootverbose) 555 ieee80211_announce(ic); 556 557 #ifdef WPI_DEBUG 558 if (sc->sc_debug & WPI_DEBUG_HW) 559 ieee80211_announce_channels(ic); 560 #endif 561 562 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 563 return 0; 564 565 fail: wpi_detach(dev); 566 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 567 return error; 568 } 569 570 /* 571 * Attach the interface to 802.11 radiotap. 572 */ 573 static void 574 wpi_radiotap_attach(struct wpi_softc *sc) 575 { 576 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 577 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 578 579 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 580 ieee80211_radiotap_attach(&sc->sc_ic, 581 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 582 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 583 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 584 } 585 586 static void 587 wpi_sysctlattach(struct wpi_softc *sc) 588 { 589 #ifdef WPI_DEBUG 590 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 591 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 592 593 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 594 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 595 "control debugging printfs"); 596 #endif 597 } 598 599 static void 600 wpi_init_beacon(struct wpi_vap *wvp) 601 { 602 struct wpi_buf *bcn = &wvp->wv_bcbuf; 603 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 604 605 cmd->id = WPI_ID_BROADCAST; 606 cmd->ofdm_mask = 0xff; 607 cmd->cck_mask = 0x0f; 608 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 609 610 /* 611 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 612 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 613 */ 614 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 615 616 bcn->code = WPI_CMD_SET_BEACON; 617 bcn->ac = WPI_CMD_QUEUE_NUM; 618 bcn->size = sizeof(struct wpi_cmd_beacon); 619 } 620 621 static struct ieee80211vap * 622 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 623 enum ieee80211_opmode opmode, int flags, 624 const uint8_t bssid[IEEE80211_ADDR_LEN], 625 const uint8_t mac[IEEE80211_ADDR_LEN]) 626 { 627 struct wpi_vap *wvp; 628 struct ieee80211vap *vap; 629 630 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 631 return NULL; 632 633 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 634 vap = &wvp->wv_vap; 635 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 636 637 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 638 WPI_VAP_LOCK_INIT(wvp); 639 wpi_init_beacon(wvp); 640 } 641 642 /* Override with driver methods. */ 643 vap->iv_key_set = wpi_key_set; 644 vap->iv_key_delete = wpi_key_delete; 645 if (opmode == IEEE80211_M_IBSS) { 646 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 647 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 648 } 649 wvp->wv_newstate = vap->iv_newstate; 650 vap->iv_newstate = wpi_newstate; 651 vap->iv_update_beacon = wpi_update_beacon; 652 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 653 654 ieee80211_ratectl_init(vap); 655 /* Complete setup. */ 656 ieee80211_vap_attach(vap, ieee80211_media_change, 657 ieee80211_media_status, mac); 658 ic->ic_opmode = opmode; 659 return vap; 660 } 661 662 static void 663 wpi_vap_delete(struct ieee80211vap *vap) 664 { 665 struct wpi_vap *wvp = WPI_VAP(vap); 666 struct wpi_buf *bcn = &wvp->wv_bcbuf; 667 enum ieee80211_opmode opmode = vap->iv_opmode; 668 669 ieee80211_ratectl_deinit(vap); 670 ieee80211_vap_detach(vap); 671 672 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 673 if (bcn->m != NULL) 674 m_freem(bcn->m); 675 676 WPI_VAP_LOCK_DESTROY(wvp); 677 } 678 679 free(wvp, M_80211_VAP); 680 } 681 682 static int 683 wpi_detach(device_t dev) 684 { 685 struct wpi_softc *sc = device_get_softc(dev); 686 struct ieee80211com *ic = &sc->sc_ic; 687 uint8_t qid; 688 689 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 690 691 if (ic->ic_vap_create == wpi_vap_create) { 692 ieee80211_draintask(ic, &sc->sc_radioon_task); 693 694 wpi_stop(sc); 695 696 if (sc->sc_tq != NULL) { 697 taskqueue_drain_all(sc->sc_tq); 698 taskqueue_free(sc->sc_tq); 699 } 700 701 callout_drain(&sc->watchdog_rfkill); 702 callout_drain(&sc->tx_timeout); 703 callout_drain(&sc->scan_timeout); 704 callout_drain(&sc->calib_to); 705 ieee80211_ifdetach(ic); 706 } 707 708 /* Uninstall interrupt handler. */ 709 if (sc->irq != NULL) { 710 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 711 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 712 sc->irq); 713 pci_release_msi(dev); 714 } 715 716 if (sc->txq[0].data_dmat) { 717 /* Free DMA resources. */ 718 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 719 wpi_free_tx_ring(sc, &sc->txq[qid]); 720 721 wpi_free_rx_ring(sc); 722 wpi_free_shared(sc); 723 } 724 725 if (sc->fw_dma.tag) 726 wpi_free_fwmem(sc); 727 728 if (sc->mem != NULL) 729 bus_release_resource(dev, SYS_RES_MEMORY, 730 rman_get_rid(sc->mem), sc->mem); 731 732 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 733 WPI_TXQ_STATE_LOCK_DESTROY(sc); 734 WPI_TXQ_LOCK_DESTROY(sc); 735 WPI_NT_LOCK_DESTROY(sc); 736 WPI_RXON_LOCK_DESTROY(sc); 737 WPI_TX_LOCK_DESTROY(sc); 738 WPI_LOCK_DESTROY(sc); 739 return 0; 740 } 741 742 static int 743 wpi_shutdown(device_t dev) 744 { 745 struct wpi_softc *sc = device_get_softc(dev); 746 747 wpi_stop(sc); 748 return 0; 749 } 750 751 static int 752 wpi_suspend(device_t dev) 753 { 754 struct wpi_softc *sc = device_get_softc(dev); 755 struct ieee80211com *ic = &sc->sc_ic; 756 757 ieee80211_suspend_all(ic); 758 return 0; 759 } 760 761 static int 762 wpi_resume(device_t dev) 763 { 764 struct wpi_softc *sc = device_get_softc(dev); 765 struct ieee80211com *ic = &sc->sc_ic; 766 767 /* Clear device-specific "PCI retry timeout" register (41h). */ 768 pci_write_config(dev, 0x41, 0, 1); 769 770 ieee80211_resume_all(ic); 771 return 0; 772 } 773 774 /* 775 * Grab exclusive access to NIC memory. 776 */ 777 static int 778 wpi_nic_lock(struct wpi_softc *sc) 779 { 780 int ntries; 781 782 /* Request exclusive access to NIC. */ 783 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 784 785 /* Spin until we actually get the lock. */ 786 for (ntries = 0; ntries < 1000; ntries++) { 787 if ((WPI_READ(sc, WPI_GP_CNTRL) & 788 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 789 WPI_GP_CNTRL_MAC_ACCESS_ENA) 790 return 0; 791 DELAY(10); 792 } 793 794 device_printf(sc->sc_dev, "could not lock memory\n"); 795 796 return ETIMEDOUT; 797 } 798 799 /* 800 * Release lock on NIC memory. 801 */ 802 static __inline void 803 wpi_nic_unlock(struct wpi_softc *sc) 804 { 805 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 806 } 807 808 static __inline uint32_t 809 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 810 { 811 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 812 WPI_BARRIER_READ_WRITE(sc); 813 return WPI_READ(sc, WPI_PRPH_RDATA); 814 } 815 816 static __inline void 817 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 818 { 819 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 820 WPI_BARRIER_WRITE(sc); 821 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 822 } 823 824 static __inline void 825 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 826 { 827 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 828 } 829 830 static __inline void 831 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 832 { 833 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 834 } 835 836 static __inline void 837 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 838 const uint32_t *data, uint32_t count) 839 { 840 for (; count != 0; count--, data++, addr += 4) 841 wpi_prph_write(sc, addr, *data); 842 } 843 844 static __inline uint32_t 845 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 846 { 847 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 848 WPI_BARRIER_READ_WRITE(sc); 849 return WPI_READ(sc, WPI_MEM_RDATA); 850 } 851 852 static __inline void 853 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 854 int count) 855 { 856 for (; count > 0; count--, addr += 4) 857 *data++ = wpi_mem_read(sc, addr); 858 } 859 860 static int 861 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 862 { 863 uint8_t *out = data; 864 uint32_t val; 865 int error, ntries; 866 867 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 868 869 if ((error = wpi_nic_lock(sc)) != 0) 870 return error; 871 872 for (; count > 0; count -= 2, addr++) { 873 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 874 for (ntries = 0; ntries < 10; ntries++) { 875 val = WPI_READ(sc, WPI_EEPROM); 876 if (val & WPI_EEPROM_READ_VALID) 877 break; 878 DELAY(5); 879 } 880 if (ntries == 10) { 881 device_printf(sc->sc_dev, 882 "timeout reading ROM at 0x%x\n", addr); 883 return ETIMEDOUT; 884 } 885 *out++= val >> 16; 886 if (count > 1) 887 *out ++= val >> 24; 888 } 889 890 wpi_nic_unlock(sc); 891 892 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 893 894 return 0; 895 } 896 897 static void 898 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 899 { 900 if (error != 0) 901 return; 902 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 903 *(bus_addr_t *)arg = segs[0].ds_addr; 904 } 905 906 /* 907 * Allocates a contiguous block of dma memory of the requested size and 908 * alignment. 909 */ 910 static int 911 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 912 void **kvap, bus_size_t size, bus_size_t alignment) 913 { 914 int error; 915 916 dma->tag = NULL; 917 dma->size = size; 918 919 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 920 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 921 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 922 if (error != 0) 923 goto fail; 924 925 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 926 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 927 if (error != 0) 928 goto fail; 929 930 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 931 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 932 if (error != 0) 933 goto fail; 934 935 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 936 937 if (kvap != NULL) 938 *kvap = dma->vaddr; 939 940 return 0; 941 942 fail: wpi_dma_contig_free(dma); 943 return error; 944 } 945 946 static void 947 wpi_dma_contig_free(struct wpi_dma_info *dma) 948 { 949 if (dma->vaddr != NULL) { 950 bus_dmamap_sync(dma->tag, dma->map, 951 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 952 bus_dmamap_unload(dma->tag, dma->map); 953 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 954 dma->vaddr = NULL; 955 } 956 if (dma->tag != NULL) { 957 bus_dma_tag_destroy(dma->tag); 958 dma->tag = NULL; 959 } 960 } 961 962 /* 963 * Allocate a shared page between host and NIC. 964 */ 965 static int 966 wpi_alloc_shared(struct wpi_softc *sc) 967 { 968 /* Shared buffer must be aligned on a 4KB boundary. */ 969 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 970 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 971 } 972 973 static void 974 wpi_free_shared(struct wpi_softc *sc) 975 { 976 wpi_dma_contig_free(&sc->shared_dma); 977 } 978 979 /* 980 * Allocate DMA-safe memory for firmware transfer. 981 */ 982 static int 983 wpi_alloc_fwmem(struct wpi_softc *sc) 984 { 985 /* Must be aligned on a 16-byte boundary. */ 986 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 987 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 988 } 989 990 static void 991 wpi_free_fwmem(struct wpi_softc *sc) 992 { 993 wpi_dma_contig_free(&sc->fw_dma); 994 } 995 996 static int 997 wpi_alloc_rx_ring(struct wpi_softc *sc) 998 { 999 struct wpi_rx_ring *ring = &sc->rxq; 1000 bus_size_t size; 1001 int i, error; 1002 1003 ring->cur = 0; 1004 ring->update = 0; 1005 1006 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1007 1008 /* Allocate RX descriptors (16KB aligned.) */ 1009 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1010 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1011 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1012 if (error != 0) { 1013 device_printf(sc->sc_dev, 1014 "%s: could not allocate RX ring DMA memory, error %d\n", 1015 __func__, error); 1016 goto fail; 1017 } 1018 1019 /* Create RX buffer DMA tag. */ 1020 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1021 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1022 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1023 &ring->data_dmat); 1024 if (error != 0) { 1025 device_printf(sc->sc_dev, 1026 "%s: could not create RX buf DMA tag, error %d\n", 1027 __func__, error); 1028 goto fail; 1029 } 1030 1031 /* 1032 * Allocate and map RX buffers. 1033 */ 1034 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1035 struct wpi_rx_data *data = &ring->data[i]; 1036 bus_addr_t paddr; 1037 1038 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1039 if (error != 0) { 1040 device_printf(sc->sc_dev, 1041 "%s: could not create RX buf DMA map, error %d\n", 1042 __func__, error); 1043 goto fail; 1044 } 1045 1046 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1047 if (data->m == NULL) { 1048 device_printf(sc->sc_dev, 1049 "%s: could not allocate RX mbuf\n", __func__); 1050 error = ENOBUFS; 1051 goto fail; 1052 } 1053 1054 error = bus_dmamap_load(ring->data_dmat, data->map, 1055 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1056 &paddr, BUS_DMA_NOWAIT); 1057 if (error != 0 && error != EFBIG) { 1058 device_printf(sc->sc_dev, 1059 "%s: can't map mbuf (error %d)\n", __func__, 1060 error); 1061 goto fail; 1062 } 1063 1064 /* Set physical address of RX buffer. */ 1065 ring->desc[i] = htole32(paddr); 1066 } 1067 1068 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1069 BUS_DMASYNC_PREWRITE); 1070 1071 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1072 1073 return 0; 1074 1075 fail: wpi_free_rx_ring(sc); 1076 1077 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1078 1079 return error; 1080 } 1081 1082 static void 1083 wpi_update_rx_ring(struct wpi_softc *sc) 1084 { 1085 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1086 } 1087 1088 static void 1089 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1090 { 1091 struct wpi_rx_ring *ring = &sc->rxq; 1092 1093 if (ring->update != 0) { 1094 /* Wait for INT_WAKEUP event. */ 1095 return; 1096 } 1097 1098 WPI_TXQ_LOCK(sc); 1099 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1100 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1101 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1102 __func__); 1103 ring->update = 1; 1104 } else { 1105 wpi_update_rx_ring(sc); 1106 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1107 } 1108 WPI_TXQ_UNLOCK(sc); 1109 } 1110 1111 static void 1112 wpi_reset_rx_ring(struct wpi_softc *sc) 1113 { 1114 struct wpi_rx_ring *ring = &sc->rxq; 1115 int ntries; 1116 1117 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1118 1119 if (wpi_nic_lock(sc) == 0) { 1120 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1121 for (ntries = 0; ntries < 1000; ntries++) { 1122 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1123 WPI_FH_RX_STATUS_IDLE) 1124 break; 1125 DELAY(10); 1126 } 1127 wpi_nic_unlock(sc); 1128 } 1129 1130 ring->cur = 0; 1131 ring->update = 0; 1132 } 1133 1134 static void 1135 wpi_free_rx_ring(struct wpi_softc *sc) 1136 { 1137 struct wpi_rx_ring *ring = &sc->rxq; 1138 int i; 1139 1140 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1141 1142 wpi_dma_contig_free(&ring->desc_dma); 1143 1144 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1145 struct wpi_rx_data *data = &ring->data[i]; 1146 1147 if (data->m != NULL) { 1148 bus_dmamap_sync(ring->data_dmat, data->map, 1149 BUS_DMASYNC_POSTREAD); 1150 bus_dmamap_unload(ring->data_dmat, data->map); 1151 m_freem(data->m); 1152 data->m = NULL; 1153 } 1154 if (data->map != NULL) 1155 bus_dmamap_destroy(ring->data_dmat, data->map); 1156 } 1157 if (ring->data_dmat != NULL) { 1158 bus_dma_tag_destroy(ring->data_dmat); 1159 ring->data_dmat = NULL; 1160 } 1161 } 1162 1163 static int 1164 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1165 { 1166 bus_addr_t paddr; 1167 bus_size_t size; 1168 int i, error; 1169 1170 ring->qid = qid; 1171 ring->queued = 0; 1172 ring->cur = 0; 1173 ring->pending = 0; 1174 ring->update = 0; 1175 1176 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1177 1178 /* Allocate TX descriptors (16KB aligned.) */ 1179 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1180 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1181 size, WPI_RING_DMA_ALIGN); 1182 if (error != 0) { 1183 device_printf(sc->sc_dev, 1184 "%s: could not allocate TX ring DMA memory, error %d\n", 1185 __func__, error); 1186 goto fail; 1187 } 1188 1189 /* Update shared area with ring physical address. */ 1190 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1191 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1192 BUS_DMASYNC_PREWRITE); 1193 1194 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1195 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1196 size, 4); 1197 if (error != 0) { 1198 device_printf(sc->sc_dev, 1199 "%s: could not allocate TX cmd DMA memory, error %d\n", 1200 __func__, error); 1201 goto fail; 1202 } 1203 1204 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1206 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1207 &ring->data_dmat); 1208 if (error != 0) { 1209 device_printf(sc->sc_dev, 1210 "%s: could not create TX buf DMA tag, error %d\n", 1211 __func__, error); 1212 goto fail; 1213 } 1214 1215 paddr = ring->cmd_dma.paddr; 1216 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1217 struct wpi_tx_data *data = &ring->data[i]; 1218 1219 data->cmd_paddr = paddr; 1220 paddr += sizeof (struct wpi_tx_cmd); 1221 1222 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1223 if (error != 0) { 1224 device_printf(sc->sc_dev, 1225 "%s: could not create TX buf DMA map, error %d\n", 1226 __func__, error); 1227 goto fail; 1228 } 1229 } 1230 1231 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1232 1233 return 0; 1234 1235 fail: wpi_free_tx_ring(sc, ring); 1236 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1237 return error; 1238 } 1239 1240 static void 1241 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1242 { 1243 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1244 } 1245 1246 static void 1247 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1248 { 1249 1250 if (ring->update != 0) { 1251 /* Wait for INT_WAKEUP event. */ 1252 return; 1253 } 1254 1255 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1256 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1257 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1258 __func__, ring->qid); 1259 ring->update = 1; 1260 } else { 1261 wpi_update_tx_ring(sc, ring); 1262 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1263 } 1264 } 1265 1266 static void 1267 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1268 { 1269 int i; 1270 1271 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1272 1273 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1274 struct wpi_tx_data *data = &ring->data[i]; 1275 1276 if (data->m != NULL) { 1277 bus_dmamap_sync(ring->data_dmat, data->map, 1278 BUS_DMASYNC_POSTWRITE); 1279 bus_dmamap_unload(ring->data_dmat, data->map); 1280 m_freem(data->m); 1281 data->m = NULL; 1282 } 1283 if (data->ni != NULL) { 1284 ieee80211_free_node(data->ni); 1285 data->ni = NULL; 1286 } 1287 } 1288 /* Clear TX descriptors. */ 1289 memset(ring->desc, 0, ring->desc_dma.size); 1290 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1291 BUS_DMASYNC_PREWRITE); 1292 ring->queued = 0; 1293 ring->cur = 0; 1294 ring->pending = 0; 1295 ring->update = 0; 1296 } 1297 1298 static void 1299 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1300 { 1301 int i; 1302 1303 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1304 1305 wpi_dma_contig_free(&ring->desc_dma); 1306 wpi_dma_contig_free(&ring->cmd_dma); 1307 1308 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1309 struct wpi_tx_data *data = &ring->data[i]; 1310 1311 if (data->m != NULL) { 1312 bus_dmamap_sync(ring->data_dmat, data->map, 1313 BUS_DMASYNC_POSTWRITE); 1314 bus_dmamap_unload(ring->data_dmat, data->map); 1315 m_freem(data->m); 1316 } 1317 if (data->map != NULL) 1318 bus_dmamap_destroy(ring->data_dmat, data->map); 1319 } 1320 if (ring->data_dmat != NULL) { 1321 bus_dma_tag_destroy(ring->data_dmat); 1322 ring->data_dmat = NULL; 1323 } 1324 } 1325 1326 /* 1327 * Extract various information from EEPROM. 1328 */ 1329 static int 1330 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1331 { 1332 #define WPI_CHK(res) do { \ 1333 if ((error = res) != 0) \ 1334 goto fail; \ 1335 } while (0) 1336 uint8_t i; 1337 int error; 1338 1339 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1340 1341 /* Adapter has to be powered on for EEPROM access to work. */ 1342 if ((error = wpi_apm_init(sc)) != 0) { 1343 device_printf(sc->sc_dev, 1344 "%s: could not power ON adapter, error %d\n", __func__, 1345 error); 1346 return error; 1347 } 1348 1349 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1350 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1351 error = EIO; 1352 goto fail; 1353 } 1354 /* Clear HW ownership of EEPROM. */ 1355 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1356 1357 /* Read the hardware capabilities, revision and SKU type. */ 1358 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1359 sizeof(sc->cap))); 1360 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1361 sizeof(sc->rev))); 1362 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1363 sizeof(sc->type))); 1364 1365 sc->rev = le16toh(sc->rev); 1366 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1367 sc->rev, sc->type); 1368 1369 /* Read the regulatory domain (4 ASCII characters.) */ 1370 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1371 sizeof(sc->domain))); 1372 1373 /* Read MAC address. */ 1374 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1375 IEEE80211_ADDR_LEN)); 1376 1377 /* Read the list of authorized channels. */ 1378 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1379 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1380 1381 /* Read the list of TX power groups. */ 1382 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1383 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1384 1385 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1386 1387 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1388 __func__); 1389 1390 return error; 1391 #undef WPI_CHK 1392 } 1393 1394 /* 1395 * Translate EEPROM flags to net80211. 1396 */ 1397 static uint32_t 1398 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1399 { 1400 uint32_t nflags; 1401 1402 nflags = 0; 1403 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1404 nflags |= IEEE80211_CHAN_PASSIVE; 1405 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1406 nflags |= IEEE80211_CHAN_NOADHOC; 1407 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1408 nflags |= IEEE80211_CHAN_DFS; 1409 /* XXX apparently IBSS may still be marked */ 1410 nflags |= IEEE80211_CHAN_NOADHOC; 1411 } 1412 1413 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1414 if (nflags & IEEE80211_CHAN_NOADHOC) 1415 nflags |= IEEE80211_CHAN_NOHOSTAP; 1416 1417 return nflags; 1418 } 1419 1420 static void 1421 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n) 1422 { 1423 struct ieee80211com *ic = &sc->sc_ic; 1424 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1425 const struct wpi_chan_band *band = &wpi_bands[n]; 1426 struct ieee80211_channel *c; 1427 uint32_t nflags; 1428 uint8_t chan, i; 1429 1430 for (i = 0; i < band->nchan; i++) { 1431 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1432 DPRINTF(sc, WPI_DEBUG_EEPROM, 1433 "Channel Not Valid: %d, band %d\n", 1434 band->chan[i],n); 1435 continue; 1436 } 1437 1438 chan = band->chan[i]; 1439 nflags = wpi_eeprom_channel_flags(&channels[i]); 1440 1441 c = &ic->ic_channels[ic->ic_nchans++]; 1442 c->ic_ieee = chan; 1443 c->ic_maxregpower = channels[i].maxpwr; 1444 c->ic_maxpower = 2*c->ic_maxregpower; 1445 1446 if (n == 0) { /* 2GHz band */ 1447 c->ic_freq = ieee80211_ieee2mhz(chan, 1448 IEEE80211_CHAN_G); 1449 1450 /* G =>'s B is supported */ 1451 c->ic_flags = IEEE80211_CHAN_B | nflags; 1452 c = &ic->ic_channels[ic->ic_nchans++]; 1453 c[0] = c[-1]; 1454 c->ic_flags = IEEE80211_CHAN_G | nflags; 1455 } else { /* 5GHz band */ 1456 c->ic_freq = ieee80211_ieee2mhz(chan, 1457 IEEE80211_CHAN_A); 1458 1459 c->ic_flags = IEEE80211_CHAN_A | nflags; 1460 } 1461 1462 /* Save maximum allowed TX power for this channel. */ 1463 sc->maxpwr[chan] = channels[i].maxpwr; 1464 1465 DPRINTF(sc, WPI_DEBUG_EEPROM, 1466 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1467 " offset %d\n", chan, c->ic_freq, 1468 channels[i].flags, sc->maxpwr[chan], 1469 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1470 } 1471 } 1472 1473 /** 1474 * Read the eeprom to find out what channels are valid for the given 1475 * band and update net80211 with what we find. 1476 */ 1477 static int 1478 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1479 { 1480 struct ieee80211com *ic = &sc->sc_ic; 1481 const struct wpi_chan_band *band = &wpi_bands[n]; 1482 int error; 1483 1484 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1485 1486 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1487 band->nchan * sizeof (struct wpi_eeprom_chan)); 1488 if (error != 0) { 1489 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1490 return error; 1491 } 1492 1493 wpi_read_eeprom_band(sc, n); 1494 1495 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1496 1497 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1498 1499 return 0; 1500 } 1501 1502 static struct wpi_eeprom_chan * 1503 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1504 { 1505 int i, j; 1506 1507 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1508 for (i = 0; i < wpi_bands[j].nchan; i++) 1509 if (wpi_bands[j].chan[i] == c->ic_ieee) 1510 return &sc->eeprom_channels[j][i]; 1511 1512 return NULL; 1513 } 1514 1515 /* 1516 * Enforce flags read from EEPROM. 1517 */ 1518 static int 1519 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1520 int nchan, struct ieee80211_channel chans[]) 1521 { 1522 struct wpi_softc *sc = ic->ic_softc; 1523 int i; 1524 1525 for (i = 0; i < nchan; i++) { 1526 struct ieee80211_channel *c = &chans[i]; 1527 struct wpi_eeprom_chan *channel; 1528 1529 channel = wpi_find_eeprom_channel(sc, c); 1530 if (channel == NULL) { 1531 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1532 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1533 return EINVAL; 1534 } 1535 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1536 } 1537 1538 return 0; 1539 } 1540 1541 static int 1542 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1543 { 1544 struct wpi_power_group *group = &sc->groups[n]; 1545 struct wpi_eeprom_group rgroup; 1546 int i, error; 1547 1548 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1549 1550 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1551 &rgroup, sizeof rgroup)) != 0) { 1552 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1553 return error; 1554 } 1555 1556 /* Save TX power group information. */ 1557 group->chan = rgroup.chan; 1558 group->maxpwr = rgroup.maxpwr; 1559 /* Retrieve temperature at which the samples were taken. */ 1560 group->temp = (int16_t)le16toh(rgroup.temp); 1561 1562 DPRINTF(sc, WPI_DEBUG_EEPROM, 1563 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1564 group->maxpwr, group->temp); 1565 1566 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1567 group->samples[i].index = rgroup.samples[i].index; 1568 group->samples[i].power = rgroup.samples[i].power; 1569 1570 DPRINTF(sc, WPI_DEBUG_EEPROM, 1571 "\tsample %d: index=%d power=%d\n", i, 1572 group->samples[i].index, group->samples[i].power); 1573 } 1574 1575 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1576 1577 return 0; 1578 } 1579 1580 static __inline uint8_t 1581 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1582 { 1583 uint8_t newid = WPI_ID_IBSS_MIN; 1584 1585 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1586 if ((sc->nodesmsk & (1 << newid)) == 0) { 1587 sc->nodesmsk |= 1 << newid; 1588 return newid; 1589 } 1590 } 1591 1592 return WPI_ID_UNDEFINED; 1593 } 1594 1595 static __inline uint8_t 1596 wpi_add_node_entry_sta(struct wpi_softc *sc) 1597 { 1598 sc->nodesmsk |= 1 << WPI_ID_BSS; 1599 1600 return WPI_ID_BSS; 1601 } 1602 1603 static __inline int 1604 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1605 { 1606 if (id == WPI_ID_UNDEFINED) 1607 return 0; 1608 1609 return (sc->nodesmsk >> id) & 1; 1610 } 1611 1612 static __inline void 1613 wpi_clear_node_table(struct wpi_softc *sc) 1614 { 1615 sc->nodesmsk = 0; 1616 } 1617 1618 static __inline void 1619 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1620 { 1621 sc->nodesmsk &= ~(1 << id); 1622 } 1623 1624 static struct ieee80211_node * 1625 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1626 { 1627 struct wpi_node *wn; 1628 1629 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1630 M_NOWAIT | M_ZERO); 1631 1632 if (wn == NULL) 1633 return NULL; 1634 1635 wn->id = WPI_ID_UNDEFINED; 1636 1637 return &wn->ni; 1638 } 1639 1640 static void 1641 wpi_node_free(struct ieee80211_node *ni) 1642 { 1643 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1644 struct wpi_node *wn = WPI_NODE(ni); 1645 1646 if (wn->id != WPI_ID_UNDEFINED) { 1647 WPI_NT_LOCK(sc); 1648 if (wpi_check_node_entry(sc, wn->id)) { 1649 wpi_del_node_entry(sc, wn->id); 1650 wpi_del_node(sc, ni); 1651 } 1652 WPI_NT_UNLOCK(sc); 1653 } 1654 1655 sc->sc_node_free(ni); 1656 } 1657 1658 static __inline int 1659 wpi_check_bss_filter(struct wpi_softc *sc) 1660 { 1661 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1662 } 1663 1664 static void 1665 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1666 const struct ieee80211_rx_stats *rxs, 1667 int rssi, int nf) 1668 { 1669 struct ieee80211vap *vap = ni->ni_vap; 1670 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1671 struct wpi_vap *wvp = WPI_VAP(vap); 1672 uint64_t ni_tstamp, rx_tstamp; 1673 1674 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1675 1676 if (vap->iv_state == IEEE80211_S_RUN && 1677 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1678 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1679 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1680 rx_tstamp = le64toh(sc->rx_tstamp); 1681 1682 if (ni_tstamp >= rx_tstamp) { 1683 DPRINTF(sc, WPI_DEBUG_STATE, 1684 "ibss merge, tsf %ju tstamp %ju\n", 1685 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1686 (void) ieee80211_ibss_merge(ni); 1687 } 1688 } 1689 } 1690 1691 static void 1692 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1693 { 1694 struct wpi_softc *sc = arg; 1695 struct wpi_node *wn = WPI_NODE(ni); 1696 int error; 1697 1698 WPI_NT_LOCK(sc); 1699 if (wn->id != WPI_ID_UNDEFINED) { 1700 wn->id = WPI_ID_UNDEFINED; 1701 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1702 device_printf(sc->sc_dev, 1703 "%s: could not add IBSS node, error %d\n", 1704 __func__, error); 1705 } 1706 } 1707 WPI_NT_UNLOCK(sc); 1708 } 1709 1710 static void 1711 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1712 { 1713 struct ieee80211com *ic = &sc->sc_ic; 1714 1715 /* Set group keys once. */ 1716 WPI_NT_LOCK(sc); 1717 wvp->wv_gtk = 0; 1718 WPI_NT_UNLOCK(sc); 1719 1720 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1721 ieee80211_crypto_reload_keys(ic); 1722 } 1723 1724 /** 1725 * Called by net80211 when ever there is a change to 80211 state machine 1726 */ 1727 static int 1728 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1729 { 1730 struct wpi_vap *wvp = WPI_VAP(vap); 1731 struct ieee80211com *ic = vap->iv_ic; 1732 struct wpi_softc *sc = ic->ic_softc; 1733 int error = 0; 1734 1735 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1736 1737 WPI_TXQ_LOCK(sc); 1738 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1739 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1740 WPI_TXQ_UNLOCK(sc); 1741 1742 return ENXIO; 1743 } 1744 WPI_TXQ_UNLOCK(sc); 1745 1746 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1747 ieee80211_state_name[vap->iv_state], 1748 ieee80211_state_name[nstate]); 1749 1750 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1751 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1752 device_printf(sc->sc_dev, 1753 "%s: could not set power saving level\n", 1754 __func__); 1755 return error; 1756 } 1757 1758 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1759 } 1760 1761 switch (nstate) { 1762 case IEEE80211_S_SCAN: 1763 WPI_RXON_LOCK(sc); 1764 if (wpi_check_bss_filter(sc) != 0) { 1765 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1766 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1767 device_printf(sc->sc_dev, 1768 "%s: could not send RXON\n", __func__); 1769 } 1770 } 1771 WPI_RXON_UNLOCK(sc); 1772 break; 1773 1774 case IEEE80211_S_ASSOC: 1775 if (vap->iv_state != IEEE80211_S_RUN) 1776 break; 1777 /* FALLTHROUGH */ 1778 case IEEE80211_S_AUTH: 1779 /* 1780 * NB: do not optimize AUTH -> AUTH state transmission - 1781 * this will break powersave with non-QoS AP! 1782 */ 1783 1784 /* 1785 * The node must be registered in the firmware before auth. 1786 * Also the associd must be cleared on RUN -> ASSOC 1787 * transitions. 1788 */ 1789 if ((error = wpi_auth(sc, vap)) != 0) { 1790 device_printf(sc->sc_dev, 1791 "%s: could not move to AUTH state, error %d\n", 1792 __func__, error); 1793 } 1794 break; 1795 1796 case IEEE80211_S_RUN: 1797 /* 1798 * RUN -> RUN transition: 1799 * STA mode: Just restart the timers. 1800 * IBSS mode: Process IBSS merge. 1801 */ 1802 if (vap->iv_state == IEEE80211_S_RUN) { 1803 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1804 WPI_RXON_LOCK(sc); 1805 wpi_calib_timeout(sc); 1806 WPI_RXON_UNLOCK(sc); 1807 break; 1808 } else { 1809 /* 1810 * Drop the BSS_FILTER bit 1811 * (there is no another way to change bssid). 1812 */ 1813 WPI_RXON_LOCK(sc); 1814 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1815 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1816 device_printf(sc->sc_dev, 1817 "%s: could not send RXON\n", 1818 __func__); 1819 } 1820 WPI_RXON_UNLOCK(sc); 1821 1822 /* Restore all what was lost. */ 1823 wpi_restore_node_table(sc, wvp); 1824 1825 /* XXX set conditionally? */ 1826 wpi_updateedca(ic); 1827 } 1828 } 1829 1830 /* 1831 * !RUN -> RUN requires setting the association id 1832 * which is done with a firmware cmd. We also defer 1833 * starting the timers until that work is done. 1834 */ 1835 if ((error = wpi_run(sc, vap)) != 0) { 1836 device_printf(sc->sc_dev, 1837 "%s: could not move to RUN state\n", __func__); 1838 } 1839 break; 1840 1841 default: 1842 break; 1843 } 1844 if (error != 0) { 1845 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1846 return error; 1847 } 1848 1849 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1850 1851 return wvp->wv_newstate(vap, nstate, arg); 1852 } 1853 1854 static void 1855 wpi_calib_timeout(void *arg) 1856 { 1857 struct wpi_softc *sc = arg; 1858 1859 if (wpi_check_bss_filter(sc) == 0) 1860 return; 1861 1862 wpi_power_calibration(sc); 1863 1864 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1865 } 1866 1867 static __inline uint8_t 1868 rate2plcp(const uint8_t rate) 1869 { 1870 switch (rate) { 1871 case 12: return 0xd; 1872 case 18: return 0xf; 1873 case 24: return 0x5; 1874 case 36: return 0x7; 1875 case 48: return 0x9; 1876 case 72: return 0xb; 1877 case 96: return 0x1; 1878 case 108: return 0x3; 1879 case 2: return 10; 1880 case 4: return 20; 1881 case 11: return 55; 1882 case 22: return 110; 1883 default: return 0; 1884 } 1885 } 1886 1887 static __inline uint8_t 1888 plcp2rate(const uint8_t plcp) 1889 { 1890 switch (plcp) { 1891 case 0xd: return 12; 1892 case 0xf: return 18; 1893 case 0x5: return 24; 1894 case 0x7: return 36; 1895 case 0x9: return 48; 1896 case 0xb: return 72; 1897 case 0x1: return 96; 1898 case 0x3: return 108; 1899 case 10: return 2; 1900 case 20: return 4; 1901 case 55: return 11; 1902 case 110: return 22; 1903 default: return 0; 1904 } 1905 } 1906 1907 /* Quickly determine if a given rate is CCK or OFDM. */ 1908 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1909 1910 static void 1911 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1912 struct wpi_rx_data *data) 1913 { 1914 struct ieee80211com *ic = &sc->sc_ic; 1915 struct wpi_rx_ring *ring = &sc->rxq; 1916 struct wpi_rx_stat *stat; 1917 struct wpi_rx_head *head; 1918 struct wpi_rx_tail *tail; 1919 struct ieee80211_frame *wh; 1920 struct ieee80211_node *ni; 1921 struct mbuf *m, *m1; 1922 bus_addr_t paddr; 1923 uint32_t flags; 1924 uint16_t len; 1925 int error; 1926 1927 stat = (struct wpi_rx_stat *)(desc + 1); 1928 1929 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1930 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1931 goto fail1; 1932 } 1933 1934 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1935 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1936 len = le16toh(head->len); 1937 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1938 flags = le32toh(tail->flags); 1939 1940 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1941 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1942 le32toh(desc->len), len, (int8_t)stat->rssi, 1943 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1944 1945 /* Discard frames with a bad FCS early. */ 1946 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1947 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1948 __func__, flags); 1949 goto fail1; 1950 } 1951 /* Discard frames that are too short. */ 1952 if (len < sizeof (struct ieee80211_frame_ack)) { 1953 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1954 __func__, len); 1955 goto fail1; 1956 } 1957 1958 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1959 if (__predict_false(m1 == NULL)) { 1960 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1961 __func__); 1962 goto fail1; 1963 } 1964 bus_dmamap_unload(ring->data_dmat, data->map); 1965 1966 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1967 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1968 if (__predict_false(error != 0 && error != EFBIG)) { 1969 device_printf(sc->sc_dev, 1970 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1971 m_freem(m1); 1972 1973 /* Try to reload the old mbuf. */ 1974 error = bus_dmamap_load(ring->data_dmat, data->map, 1975 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1976 &paddr, BUS_DMA_NOWAIT); 1977 if (error != 0 && error != EFBIG) { 1978 panic("%s: could not load old RX mbuf", __func__); 1979 } 1980 /* Physical address may have changed. */ 1981 ring->desc[ring->cur] = htole32(paddr); 1982 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1983 BUS_DMASYNC_PREWRITE); 1984 goto fail1; 1985 } 1986 1987 m = data->m; 1988 data->m = m1; 1989 /* Update RX descriptor. */ 1990 ring->desc[ring->cur] = htole32(paddr); 1991 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1992 BUS_DMASYNC_PREWRITE); 1993 1994 /* Finalize mbuf. */ 1995 m->m_data = (caddr_t)(head + 1); 1996 m->m_pkthdr.len = m->m_len = len; 1997 1998 /* Grab a reference to the source node. */ 1999 wh = mtod(m, struct ieee80211_frame *); 2000 2001 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2002 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2003 /* Check whether decryption was successful or not. */ 2004 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2005 DPRINTF(sc, WPI_DEBUG_RECV, 2006 "CCMP decryption failed 0x%x\n", flags); 2007 goto fail2; 2008 } 2009 m->m_flags |= M_WEP; 2010 } 2011 2012 if (len >= sizeof(struct ieee80211_frame_min)) 2013 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2014 else 2015 ni = NULL; 2016 2017 sc->rx_tstamp = tail->tstamp; 2018 2019 if (ieee80211_radiotap_active(ic)) { 2020 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2021 2022 tap->wr_flags = 0; 2023 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2024 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2025 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2026 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2027 tap->wr_tsft = tail->tstamp; 2028 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2029 tap->wr_rate = plcp2rate(head->plcp); 2030 } 2031 2032 WPI_UNLOCK(sc); 2033 2034 /* Send the frame to the 802.11 layer. */ 2035 if (ni != NULL) { 2036 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2037 /* Node is no longer needed. */ 2038 ieee80211_free_node(ni); 2039 } else 2040 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2041 2042 WPI_LOCK(sc); 2043 2044 return; 2045 2046 fail2: m_freem(m); 2047 2048 fail1: counter_u64_add(ic->ic_ierrors, 1); 2049 } 2050 2051 static void 2052 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2053 struct wpi_rx_data *data) 2054 { 2055 /* Ignore */ 2056 } 2057 2058 static void 2059 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2060 { 2061 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2062 struct wpi_tx_data *data = &ring->data[desc->idx]; 2063 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2064 struct mbuf *m; 2065 struct ieee80211_node *ni; 2066 struct ieee80211vap *vap; 2067 struct ieee80211com *ic; 2068 uint32_t status = le32toh(stat->status); 2069 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2070 2071 KASSERT(data->ni != NULL, ("no node")); 2072 KASSERT(data->m != NULL, ("no mbuf")); 2073 2074 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2075 2076 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2077 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2078 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2079 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2080 2081 /* Unmap and free mbuf. */ 2082 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2083 bus_dmamap_unload(ring->data_dmat, data->map); 2084 m = data->m, data->m = NULL; 2085 ni = data->ni, data->ni = NULL; 2086 vap = ni->ni_vap; 2087 ic = vap->iv_ic; 2088 2089 /* 2090 * Update rate control statistics for the node. 2091 */ 2092 if (status & WPI_TX_STATUS_FAIL) { 2093 ieee80211_ratectl_tx_complete(vap, ni, 2094 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2095 } else 2096 ieee80211_ratectl_tx_complete(vap, ni, 2097 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2098 2099 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2100 2101 WPI_TXQ_STATE_LOCK(sc); 2102 if (--ring->queued > 0) 2103 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2104 else 2105 callout_stop(&sc->tx_timeout); 2106 WPI_TXQ_STATE_UNLOCK(sc); 2107 2108 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2109 } 2110 2111 /* 2112 * Process a "command done" firmware notification. This is where we wakeup 2113 * processes waiting for a synchronous command completion. 2114 */ 2115 static void 2116 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2117 { 2118 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2119 struct wpi_tx_data *data; 2120 struct wpi_tx_cmd *cmd; 2121 2122 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2123 "type %s len %d\n", desc->qid, desc->idx, 2124 desc->flags, wpi_cmd_str(desc->type), 2125 le32toh(desc->len)); 2126 2127 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2128 return; /* Not a command ack. */ 2129 2130 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2131 2132 data = &ring->data[desc->idx]; 2133 cmd = &ring->cmd[desc->idx]; 2134 2135 /* If the command was mapped in an mbuf, free it. */ 2136 if (data->m != NULL) { 2137 bus_dmamap_sync(ring->data_dmat, data->map, 2138 BUS_DMASYNC_POSTWRITE); 2139 bus_dmamap_unload(ring->data_dmat, data->map); 2140 m_freem(data->m); 2141 data->m = NULL; 2142 } 2143 2144 wakeup(cmd); 2145 2146 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2147 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2148 2149 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2150 BUS_DMASYNC_POSTREAD); 2151 2152 WPI_TXQ_LOCK(sc); 2153 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2154 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2155 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2156 } else { 2157 sc->sc_update_rx_ring = wpi_update_rx_ring; 2158 sc->sc_update_tx_ring = wpi_update_tx_ring; 2159 } 2160 WPI_TXQ_UNLOCK(sc); 2161 } 2162 } 2163 2164 static void 2165 wpi_notif_intr(struct wpi_softc *sc) 2166 { 2167 struct ieee80211com *ic = &sc->sc_ic; 2168 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2169 uint32_t hw; 2170 2171 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2172 BUS_DMASYNC_POSTREAD); 2173 2174 hw = le32toh(sc->shared->next) & 0xfff; 2175 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2176 2177 while (sc->rxq.cur != hw) { 2178 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2179 2180 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2181 struct wpi_rx_desc *desc; 2182 2183 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2184 BUS_DMASYNC_POSTREAD); 2185 desc = mtod(data->m, struct wpi_rx_desc *); 2186 2187 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2188 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2189 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2190 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2191 2192 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2193 /* Reply to a command. */ 2194 wpi_cmd_done(sc, desc); 2195 } 2196 2197 switch (desc->type) { 2198 case WPI_RX_DONE: 2199 /* An 802.11 frame has been received. */ 2200 wpi_rx_done(sc, desc, data); 2201 2202 if (__predict_false(sc->sc_running == 0)) { 2203 /* wpi_stop() was called. */ 2204 return; 2205 } 2206 2207 break; 2208 2209 case WPI_TX_DONE: 2210 /* An 802.11 frame has been transmitted. */ 2211 wpi_tx_done(sc, desc); 2212 break; 2213 2214 case WPI_RX_STATISTICS: 2215 case WPI_BEACON_STATISTICS: 2216 wpi_rx_statistics(sc, desc, data); 2217 break; 2218 2219 case WPI_BEACON_MISSED: 2220 { 2221 struct wpi_beacon_missed *miss = 2222 (struct wpi_beacon_missed *)(desc + 1); 2223 uint32_t expected, misses, received, threshold; 2224 2225 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2226 BUS_DMASYNC_POSTREAD); 2227 2228 misses = le32toh(miss->consecutive); 2229 expected = le32toh(miss->expected); 2230 received = le32toh(miss->received); 2231 threshold = MAX(2, vap->iv_bmissthreshold); 2232 2233 DPRINTF(sc, WPI_DEBUG_BMISS, 2234 "%s: beacons missed %u(%u) (received %u/%u)\n", 2235 __func__, misses, le32toh(miss->total), received, 2236 expected); 2237 2238 if (misses >= threshold || 2239 (received == 0 && expected >= threshold)) { 2240 WPI_RXON_LOCK(sc); 2241 if (callout_pending(&sc->scan_timeout)) { 2242 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2243 0, 1); 2244 } 2245 WPI_RXON_UNLOCK(sc); 2246 if (vap->iv_state == IEEE80211_S_RUN && 2247 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2248 ieee80211_beacon_miss(ic); 2249 } 2250 2251 break; 2252 } 2253 #ifdef WPI_DEBUG 2254 case WPI_BEACON_SENT: 2255 { 2256 struct wpi_tx_stat *stat = 2257 (struct wpi_tx_stat *)(desc + 1); 2258 uint64_t *tsf = (uint64_t *)(stat + 1); 2259 uint32_t *mode = (uint32_t *)(tsf + 1); 2260 2261 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2262 BUS_DMASYNC_POSTREAD); 2263 2264 DPRINTF(sc, WPI_DEBUG_BEACON, 2265 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2266 "duration %u, status %x, tsf %ju, mode %x\n", 2267 stat->rtsfailcnt, stat->ackfailcnt, 2268 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2269 le32toh(stat->status), le64toh(*tsf), 2270 le32toh(*mode)); 2271 2272 break; 2273 } 2274 #endif 2275 case WPI_UC_READY: 2276 { 2277 struct wpi_ucode_info *uc = 2278 (struct wpi_ucode_info *)(desc + 1); 2279 2280 /* The microcontroller is ready. */ 2281 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2282 BUS_DMASYNC_POSTREAD); 2283 DPRINTF(sc, WPI_DEBUG_RESET, 2284 "microcode alive notification version=%d.%d " 2285 "subtype=%x alive=%x\n", uc->major, uc->minor, 2286 uc->subtype, le32toh(uc->valid)); 2287 2288 if (le32toh(uc->valid) != 1) { 2289 device_printf(sc->sc_dev, 2290 "microcontroller initialization failed\n"); 2291 wpi_stop_locked(sc); 2292 return; 2293 } 2294 /* Save the address of the error log in SRAM. */ 2295 sc->errptr = le32toh(uc->errptr); 2296 break; 2297 } 2298 case WPI_STATE_CHANGED: 2299 { 2300 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2301 BUS_DMASYNC_POSTREAD); 2302 2303 uint32_t *status = (uint32_t *)(desc + 1); 2304 2305 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2306 le32toh(*status)); 2307 2308 if (le32toh(*status) & 1) { 2309 WPI_NT_LOCK(sc); 2310 wpi_clear_node_table(sc); 2311 WPI_NT_UNLOCK(sc); 2312 taskqueue_enqueue(sc->sc_tq, 2313 &sc->sc_radiooff_task); 2314 return; 2315 } 2316 break; 2317 } 2318 #ifdef WPI_DEBUG 2319 case WPI_START_SCAN: 2320 { 2321 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2322 BUS_DMASYNC_POSTREAD); 2323 2324 struct wpi_start_scan *scan = 2325 (struct wpi_start_scan *)(desc + 1); 2326 DPRINTF(sc, WPI_DEBUG_SCAN, 2327 "%s: scanning channel %d status %x\n", 2328 __func__, scan->chan, le32toh(scan->status)); 2329 2330 break; 2331 } 2332 #endif 2333 case WPI_STOP_SCAN: 2334 { 2335 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2336 BUS_DMASYNC_POSTREAD); 2337 2338 struct wpi_stop_scan *scan = 2339 (struct wpi_stop_scan *)(desc + 1); 2340 2341 DPRINTF(sc, WPI_DEBUG_SCAN, 2342 "scan finished nchan=%d status=%d chan=%d\n", 2343 scan->nchan, scan->status, scan->chan); 2344 2345 WPI_RXON_LOCK(sc); 2346 callout_stop(&sc->scan_timeout); 2347 WPI_RXON_UNLOCK(sc); 2348 if (scan->status == WPI_SCAN_ABORTED) 2349 ieee80211_cancel_scan(vap); 2350 else 2351 ieee80211_scan_next(vap); 2352 break; 2353 } 2354 } 2355 2356 if (sc->rxq.cur % 8 == 0) { 2357 /* Tell the firmware what we have processed. */ 2358 sc->sc_update_rx_ring(sc); 2359 } 2360 } 2361 } 2362 2363 /* 2364 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2365 * from power-down sleep mode. 2366 */ 2367 static void 2368 wpi_wakeup_intr(struct wpi_softc *sc) 2369 { 2370 int qid; 2371 2372 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2373 "%s: ucode wakeup from power-down sleep\n", __func__); 2374 2375 /* Wakeup RX and TX rings. */ 2376 if (sc->rxq.update) { 2377 sc->rxq.update = 0; 2378 wpi_update_rx_ring(sc); 2379 } 2380 WPI_TXQ_LOCK(sc); 2381 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2382 struct wpi_tx_ring *ring = &sc->txq[qid]; 2383 2384 if (ring->update) { 2385 ring->update = 0; 2386 wpi_update_tx_ring(sc, ring); 2387 } 2388 } 2389 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2390 WPI_TXQ_UNLOCK(sc); 2391 } 2392 2393 /* 2394 * This function prints firmware registers 2395 */ 2396 #ifdef WPI_DEBUG 2397 static void 2398 wpi_debug_registers(struct wpi_softc *sc) 2399 { 2400 size_t i; 2401 static const uint32_t csr_tbl[] = { 2402 WPI_HW_IF_CONFIG, 2403 WPI_INT, 2404 WPI_INT_MASK, 2405 WPI_FH_INT, 2406 WPI_GPIO_IN, 2407 WPI_RESET, 2408 WPI_GP_CNTRL, 2409 WPI_EEPROM, 2410 WPI_EEPROM_GP, 2411 WPI_GIO, 2412 WPI_UCODE_GP1, 2413 WPI_UCODE_GP2, 2414 WPI_GIO_CHICKEN, 2415 WPI_ANA_PLL, 2416 WPI_DBG_HPET_MEM, 2417 }; 2418 static const uint32_t prph_tbl[] = { 2419 WPI_APMG_CLK_CTRL, 2420 WPI_APMG_PS, 2421 WPI_APMG_PCI_STT, 2422 WPI_APMG_RFKILL, 2423 }; 2424 2425 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2426 2427 for (i = 0; i < nitems(csr_tbl); i++) { 2428 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2429 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2430 2431 if ((i + 1) % 2 == 0) 2432 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2433 } 2434 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2435 2436 if (wpi_nic_lock(sc) == 0) { 2437 for (i = 0; i < nitems(prph_tbl); i++) { 2438 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2439 wpi_get_prph_string(prph_tbl[i]), 2440 wpi_prph_read(sc, prph_tbl[i])); 2441 2442 if ((i + 1) % 2 == 0) 2443 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2444 } 2445 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2446 wpi_nic_unlock(sc); 2447 } else { 2448 DPRINTF(sc, WPI_DEBUG_REGISTER, 2449 "Cannot access internal registers.\n"); 2450 } 2451 } 2452 #endif 2453 2454 /* 2455 * Dump the error log of the firmware when a firmware panic occurs. Although 2456 * we can't debug the firmware because it is neither open source nor free, it 2457 * can help us to identify certain classes of problems. 2458 */ 2459 static void 2460 wpi_fatal_intr(struct wpi_softc *sc) 2461 { 2462 struct wpi_fw_dump dump; 2463 uint32_t i, offset, count; 2464 2465 /* Check that the error log address is valid. */ 2466 if (sc->errptr < WPI_FW_DATA_BASE || 2467 sc->errptr + sizeof (dump) > 2468 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2469 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2470 sc->errptr); 2471 return; 2472 } 2473 if (wpi_nic_lock(sc) != 0) { 2474 printf("%s: could not read firmware error log\n", __func__); 2475 return; 2476 } 2477 /* Read number of entries in the log. */ 2478 count = wpi_mem_read(sc, sc->errptr); 2479 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2480 printf("%s: invalid count field (count = %u)\n", __func__, 2481 count); 2482 wpi_nic_unlock(sc); 2483 return; 2484 } 2485 /* Skip "count" field. */ 2486 offset = sc->errptr + sizeof (uint32_t); 2487 printf("firmware error log (count = %u):\n", count); 2488 for (i = 0; i < count; i++) { 2489 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2490 sizeof (dump) / sizeof (uint32_t)); 2491 2492 printf(" error type = \"%s\" (0x%08X)\n", 2493 (dump.desc < nitems(wpi_fw_errmsg)) ? 2494 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2495 dump.desc); 2496 printf(" error data = 0x%08X\n", 2497 dump.data); 2498 printf(" branch link = 0x%08X%08X\n", 2499 dump.blink[0], dump.blink[1]); 2500 printf(" interrupt link = 0x%08X%08X\n", 2501 dump.ilink[0], dump.ilink[1]); 2502 printf(" time = %u\n", dump.time); 2503 2504 offset += sizeof (dump); 2505 } 2506 wpi_nic_unlock(sc); 2507 /* Dump driver status (TX and RX rings) while we're here. */ 2508 printf("driver status:\n"); 2509 WPI_TXQ_LOCK(sc); 2510 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2511 struct wpi_tx_ring *ring = &sc->txq[i]; 2512 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2513 i, ring->qid, ring->cur, ring->queued); 2514 } 2515 WPI_TXQ_UNLOCK(sc); 2516 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2517 } 2518 2519 static void 2520 wpi_intr(void *arg) 2521 { 2522 struct wpi_softc *sc = arg; 2523 uint32_t r1, r2; 2524 2525 WPI_LOCK(sc); 2526 2527 /* Disable interrupts. */ 2528 WPI_WRITE(sc, WPI_INT_MASK, 0); 2529 2530 r1 = WPI_READ(sc, WPI_INT); 2531 2532 if (__predict_false(r1 == 0xffffffff || 2533 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2534 goto end; /* Hardware gone! */ 2535 2536 r2 = WPI_READ(sc, WPI_FH_INT); 2537 2538 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2539 r1, r2); 2540 2541 if (r1 == 0 && r2 == 0) 2542 goto done; /* Interrupt not for us. */ 2543 2544 /* Acknowledge interrupts. */ 2545 WPI_WRITE(sc, WPI_INT, r1); 2546 WPI_WRITE(sc, WPI_FH_INT, r2); 2547 2548 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2549 device_printf(sc->sc_dev, "fatal firmware error\n"); 2550 #ifdef WPI_DEBUG 2551 wpi_debug_registers(sc); 2552 #endif 2553 wpi_fatal_intr(sc); 2554 DPRINTF(sc, WPI_DEBUG_HW, 2555 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2556 "(Hardware Error)"); 2557 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2558 goto end; 2559 } 2560 2561 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2562 (r2 & WPI_FH_INT_RX)) 2563 wpi_notif_intr(sc); 2564 2565 if (r1 & WPI_INT_ALIVE) 2566 wakeup(sc); /* Firmware is alive. */ 2567 2568 if (r1 & WPI_INT_WAKEUP) 2569 wpi_wakeup_intr(sc); 2570 2571 done: 2572 /* Re-enable interrupts. */ 2573 if (__predict_true(sc->sc_running)) 2574 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2575 2576 end: WPI_UNLOCK(sc); 2577 } 2578 2579 static void 2580 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2581 { 2582 struct wpi_tx_ring *ring; 2583 struct wpi_tx_data *data; 2584 uint8_t cur; 2585 2586 WPI_TXQ_LOCK(sc); 2587 ring = &sc->txq[ac]; 2588 2589 while (ring->pending != 0) { 2590 ring->pending--; 2591 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2592 data = &ring->data[cur]; 2593 2594 bus_dmamap_sync(ring->data_dmat, data->map, 2595 BUS_DMASYNC_POSTWRITE); 2596 bus_dmamap_unload(ring->data_dmat, data->map); 2597 m_freem(data->m); 2598 data->m = NULL; 2599 2600 ieee80211_node_decref(data->ni); 2601 data->ni = NULL; 2602 } 2603 2604 WPI_TXQ_UNLOCK(sc); 2605 } 2606 2607 static int 2608 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2609 { 2610 struct ieee80211_frame *wh; 2611 struct wpi_tx_cmd *cmd; 2612 struct wpi_tx_data *data; 2613 struct wpi_tx_desc *desc; 2614 struct wpi_tx_ring *ring; 2615 struct mbuf *m1; 2616 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2617 uint8_t cur, pad; 2618 uint16_t hdrlen; 2619 int error, i, nsegs, totlen, frag; 2620 2621 WPI_TXQ_LOCK(sc); 2622 2623 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2624 2625 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2626 2627 if (__predict_false(sc->sc_running == 0)) { 2628 /* wpi_stop() was called */ 2629 error = ENETDOWN; 2630 goto end; 2631 } 2632 2633 wh = mtod(buf->m, struct ieee80211_frame *); 2634 hdrlen = ieee80211_anyhdrsize(wh); 2635 totlen = buf->m->m_pkthdr.len; 2636 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2637 2638 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2639 error = EINVAL; 2640 goto end; 2641 } 2642 2643 if (hdrlen & 3) { 2644 /* First segment length must be a multiple of 4. */ 2645 pad = 4 - (hdrlen & 3); 2646 } else 2647 pad = 0; 2648 2649 ring = &sc->txq[buf->ac]; 2650 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2651 desc = &ring->desc[cur]; 2652 data = &ring->data[cur]; 2653 2654 /* Prepare TX firmware command. */ 2655 cmd = &ring->cmd[cur]; 2656 cmd->code = buf->code; 2657 cmd->flags = 0; 2658 cmd->qid = ring->qid; 2659 cmd->idx = cur; 2660 2661 memcpy(cmd->data, buf->data, buf->size); 2662 2663 /* Save and trim IEEE802.11 header. */ 2664 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2665 m_adj(buf->m, hdrlen); 2666 2667 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2668 segs, &nsegs, BUS_DMA_NOWAIT); 2669 if (error != 0 && error != EFBIG) { 2670 device_printf(sc->sc_dev, 2671 "%s: can't map mbuf (error %d)\n", __func__, error); 2672 goto end; 2673 } 2674 if (error != 0) { 2675 /* Too many DMA segments, linearize mbuf. */ 2676 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2677 if (m1 == NULL) { 2678 device_printf(sc->sc_dev, 2679 "%s: could not defrag mbuf\n", __func__); 2680 error = ENOBUFS; 2681 goto end; 2682 } 2683 buf->m = m1; 2684 2685 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2686 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2687 if (__predict_false(error != 0)) { 2688 /* XXX fix this (applicable to the iwn(4) too) */ 2689 /* 2690 * NB: Do not return error; 2691 * original mbuf does not exist anymore. 2692 */ 2693 device_printf(sc->sc_dev, 2694 "%s: can't map mbuf (error %d)\n", __func__, 2695 error); 2696 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2697 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2698 IFCOUNTER_OERRORS, 1); 2699 if (!frag) 2700 ieee80211_free_node(buf->ni); 2701 } 2702 m_freem(buf->m); 2703 error = 0; 2704 goto end; 2705 } 2706 } 2707 2708 KASSERT(nsegs < WPI_MAX_SCATTER, 2709 ("too many DMA segments, nsegs (%d) should be less than %d", 2710 nsegs, WPI_MAX_SCATTER)); 2711 2712 data->m = buf->m; 2713 data->ni = buf->ni; 2714 2715 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2716 __func__, ring->qid, cur, totlen, nsegs); 2717 2718 /* Fill TX descriptor. */ 2719 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2720 /* First DMA segment is used by the TX command. */ 2721 desc->segs[0].addr = htole32(data->cmd_paddr); 2722 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2723 /* Other DMA segments are for data payload. */ 2724 seg = &segs[0]; 2725 for (i = 1; i <= nsegs; i++) { 2726 desc->segs[i].addr = htole32(seg->ds_addr); 2727 desc->segs[i].len = htole32(seg->ds_len); 2728 seg++; 2729 } 2730 2731 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2732 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2733 BUS_DMASYNC_PREWRITE); 2734 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2735 BUS_DMASYNC_PREWRITE); 2736 2737 ring->pending += 1; 2738 2739 if (!frag) { 2740 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2741 WPI_TXQ_STATE_LOCK(sc); 2742 ring->queued += ring->pending; 2743 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2744 sc); 2745 WPI_TXQ_STATE_UNLOCK(sc); 2746 } 2747 2748 /* Kick TX ring. */ 2749 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2750 ring->pending = 0; 2751 sc->sc_update_tx_ring(sc, ring); 2752 } else 2753 ieee80211_node_incref(data->ni); 2754 2755 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2756 __func__); 2757 2758 WPI_TXQ_UNLOCK(sc); 2759 2760 return (error); 2761 } 2762 2763 /* 2764 * Construct the data packet for a transmit buffer. 2765 */ 2766 static int 2767 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2768 { 2769 const struct ieee80211_txparam *tp; 2770 struct ieee80211vap *vap = ni->ni_vap; 2771 struct ieee80211com *ic = ni->ni_ic; 2772 struct wpi_node *wn = WPI_NODE(ni); 2773 struct ieee80211_channel *chan; 2774 struct ieee80211_frame *wh; 2775 struct ieee80211_key *k = NULL; 2776 struct wpi_buf tx_data; 2777 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2778 uint32_t flags; 2779 uint16_t ac, qos; 2780 uint8_t tid, type, rate; 2781 int swcrypt, ismcast, totlen; 2782 2783 wh = mtod(m, struct ieee80211_frame *); 2784 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2785 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2786 swcrypt = 1; 2787 2788 /* Select EDCA Access Category and TX ring for this frame. */ 2789 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2790 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2791 tid = qos & IEEE80211_QOS_TID; 2792 } else { 2793 qos = 0; 2794 tid = 0; 2795 } 2796 ac = M_WME_GETAC(m); 2797 2798 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2799 ni->ni_chan : ic->ic_curchan; 2800 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2801 2802 /* Choose a TX rate index. */ 2803 if (type == IEEE80211_FC0_TYPE_MGT) 2804 rate = tp->mgmtrate; 2805 else if (ismcast) 2806 rate = tp->mcastrate; 2807 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2808 rate = tp->ucastrate; 2809 else if (m->m_flags & M_EAPOL) 2810 rate = tp->mgmtrate; 2811 else { 2812 /* XXX pass pktlen */ 2813 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2814 rate = ni->ni_txrate; 2815 } 2816 2817 /* Encrypt the frame if need be. */ 2818 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2819 /* Retrieve key for TX. */ 2820 k = ieee80211_crypto_encap(ni, m); 2821 if (k == NULL) 2822 return (ENOBUFS); 2823 2824 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2825 2826 /* 802.11 header may have moved. */ 2827 wh = mtod(m, struct ieee80211_frame *); 2828 } 2829 totlen = m->m_pkthdr.len; 2830 2831 if (ieee80211_radiotap_active_vap(vap)) { 2832 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2833 2834 tap->wt_flags = 0; 2835 tap->wt_rate = rate; 2836 if (k != NULL) 2837 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2838 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2839 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2840 2841 ieee80211_radiotap_tx(vap, m); 2842 } 2843 2844 flags = 0; 2845 if (!ismcast) { 2846 /* Unicast frame, check if an ACK is expected. */ 2847 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2848 IEEE80211_QOS_ACKPOLICY_NOACK) 2849 flags |= WPI_TX_NEED_ACK; 2850 } 2851 2852 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2853 flags |= WPI_TX_AUTO_SEQ; 2854 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2855 flags |= WPI_TX_MORE_FRAG; 2856 2857 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2858 if (!ismcast) { 2859 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2860 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2861 flags |= WPI_TX_NEED_RTS; 2862 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2863 WPI_RATE_IS_OFDM(rate)) { 2864 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2865 flags |= WPI_TX_NEED_CTS; 2866 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2867 flags |= WPI_TX_NEED_RTS; 2868 } 2869 2870 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2871 flags |= WPI_TX_FULL_TXOP; 2872 } 2873 2874 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2875 if (type == IEEE80211_FC0_TYPE_MGT) { 2876 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2877 2878 /* Tell HW to set timestamp in probe responses. */ 2879 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2880 flags |= WPI_TX_INSERT_TSTAMP; 2881 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2882 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2883 tx->timeout = htole16(3); 2884 else 2885 tx->timeout = htole16(2); 2886 } 2887 2888 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2889 tx->id = WPI_ID_BROADCAST; 2890 else { 2891 if (wn->id == WPI_ID_UNDEFINED) { 2892 device_printf(sc->sc_dev, 2893 "%s: undefined node id\n", __func__); 2894 return (EINVAL); 2895 } 2896 2897 tx->id = wn->id; 2898 } 2899 2900 if (!swcrypt) { 2901 switch (k->wk_cipher->ic_cipher) { 2902 case IEEE80211_CIPHER_AES_CCM: 2903 tx->security = WPI_CIPHER_CCMP; 2904 break; 2905 2906 default: 2907 break; 2908 } 2909 2910 memcpy(tx->key, k->wk_key, k->wk_keylen); 2911 } 2912 2913 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2914 struct mbuf *next = m->m_nextpkt; 2915 2916 tx->lnext = htole16(next->m_pkthdr.len); 2917 tx->fnext = htole32(tx->security | 2918 (flags & WPI_TX_NEED_ACK) | 2919 WPI_NEXT_STA_ID(tx->id)); 2920 } 2921 2922 tx->len = htole16(totlen); 2923 tx->flags = htole32(flags); 2924 tx->plcp = rate2plcp(rate); 2925 tx->tid = tid; 2926 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2927 tx->ofdm_mask = 0xff; 2928 tx->cck_mask = 0x0f; 2929 tx->rts_ntries = 7; 2930 tx->data_ntries = tp->maxretry; 2931 2932 tx_data.ni = ni; 2933 tx_data.m = m; 2934 tx_data.size = sizeof(struct wpi_cmd_data); 2935 tx_data.code = WPI_CMD_TX_DATA; 2936 tx_data.ac = ac; 2937 2938 return wpi_cmd2(sc, &tx_data); 2939 } 2940 2941 static int 2942 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2943 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2944 { 2945 struct ieee80211vap *vap = ni->ni_vap; 2946 struct ieee80211_key *k = NULL; 2947 struct ieee80211_frame *wh; 2948 struct wpi_buf tx_data; 2949 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2950 uint32_t flags; 2951 uint8_t ac, type, rate; 2952 int swcrypt, totlen; 2953 2954 wh = mtod(m, struct ieee80211_frame *); 2955 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2956 swcrypt = 1; 2957 2958 ac = params->ibp_pri & 3; 2959 2960 /* Choose a TX rate index. */ 2961 rate = params->ibp_rate0; 2962 2963 flags = 0; 2964 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2965 flags |= WPI_TX_AUTO_SEQ; 2966 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2967 flags |= WPI_TX_NEED_ACK; 2968 if (params->ibp_flags & IEEE80211_BPF_RTS) 2969 flags |= WPI_TX_NEED_RTS; 2970 if (params->ibp_flags & IEEE80211_BPF_CTS) 2971 flags |= WPI_TX_NEED_CTS; 2972 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2973 flags |= WPI_TX_FULL_TXOP; 2974 2975 /* Encrypt the frame if need be. */ 2976 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2977 /* Retrieve key for TX. */ 2978 k = ieee80211_crypto_encap(ni, m); 2979 if (k == NULL) 2980 return (ENOBUFS); 2981 2982 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2983 2984 /* 802.11 header may have moved. */ 2985 wh = mtod(m, struct ieee80211_frame *); 2986 } 2987 totlen = m->m_pkthdr.len; 2988 2989 if (ieee80211_radiotap_active_vap(vap)) { 2990 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2991 2992 tap->wt_flags = 0; 2993 tap->wt_rate = rate; 2994 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2995 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2996 2997 ieee80211_radiotap_tx(vap, m); 2998 } 2999 3000 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3001 if (type == IEEE80211_FC0_TYPE_MGT) { 3002 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3003 3004 /* Tell HW to set timestamp in probe responses. */ 3005 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3006 flags |= WPI_TX_INSERT_TSTAMP; 3007 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3008 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3009 tx->timeout = htole16(3); 3010 else 3011 tx->timeout = htole16(2); 3012 } 3013 3014 if (!swcrypt) { 3015 switch (k->wk_cipher->ic_cipher) { 3016 case IEEE80211_CIPHER_AES_CCM: 3017 tx->security = WPI_CIPHER_CCMP; 3018 break; 3019 3020 default: 3021 break; 3022 } 3023 3024 memcpy(tx->key, k->wk_key, k->wk_keylen); 3025 } 3026 3027 tx->len = htole16(totlen); 3028 tx->flags = htole32(flags); 3029 tx->plcp = rate2plcp(rate); 3030 tx->id = WPI_ID_BROADCAST; 3031 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3032 tx->rts_ntries = params->ibp_try1; 3033 tx->data_ntries = params->ibp_try0; 3034 3035 tx_data.ni = ni; 3036 tx_data.m = m; 3037 tx_data.size = sizeof(struct wpi_cmd_data); 3038 tx_data.code = WPI_CMD_TX_DATA; 3039 tx_data.ac = ac; 3040 3041 return wpi_cmd2(sc, &tx_data); 3042 } 3043 3044 static __inline int 3045 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3046 { 3047 struct wpi_tx_ring *ring = &sc->txq[ac]; 3048 int retval; 3049 3050 WPI_TXQ_STATE_LOCK(sc); 3051 retval = WPI_TX_RING_HIMARK - ring->queued; 3052 WPI_TXQ_STATE_UNLOCK(sc); 3053 3054 return retval; 3055 } 3056 3057 static int 3058 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3059 const struct ieee80211_bpf_params *params) 3060 { 3061 struct ieee80211com *ic = ni->ni_ic; 3062 struct wpi_softc *sc = ic->ic_softc; 3063 uint16_t ac; 3064 int error = 0; 3065 3066 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3067 3068 ac = M_WME_GETAC(m); 3069 3070 WPI_TX_LOCK(sc); 3071 3072 /* NB: no fragments here */ 3073 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3074 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3075 goto unlock; 3076 } 3077 3078 if (params == NULL) { 3079 /* 3080 * Legacy path; interpret frame contents to decide 3081 * precisely how to send the frame. 3082 */ 3083 error = wpi_tx_data(sc, m, ni); 3084 } else { 3085 /* 3086 * Caller supplied explicit parameters to use in 3087 * sending the frame. 3088 */ 3089 error = wpi_tx_data_raw(sc, m, ni, params); 3090 } 3091 3092 unlock: WPI_TX_UNLOCK(sc); 3093 3094 if (error != 0) { 3095 m_freem(m); 3096 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3097 3098 return error; 3099 } 3100 3101 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3102 3103 return 0; 3104 } 3105 3106 static int 3107 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3108 { 3109 struct wpi_softc *sc = ic->ic_softc; 3110 struct ieee80211_node *ni; 3111 struct mbuf *mnext; 3112 uint16_t ac; 3113 int error, nmbufs; 3114 3115 WPI_TX_LOCK(sc); 3116 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3117 3118 /* Check if interface is up & running. */ 3119 if (__predict_false(sc->sc_running == 0)) { 3120 error = ENXIO; 3121 goto unlock; 3122 } 3123 3124 nmbufs = 1; 3125 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3126 nmbufs++; 3127 3128 /* Check for available space. */ 3129 ac = M_WME_GETAC(m); 3130 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3131 error = ENOBUFS; 3132 goto unlock; 3133 } 3134 3135 error = 0; 3136 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3137 do { 3138 mnext = m->m_nextpkt; 3139 if (wpi_tx_data(sc, m, ni) != 0) { 3140 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3141 nmbufs); 3142 wpi_free_txfrags(sc, ac); 3143 ieee80211_free_mbuf(m); 3144 ieee80211_free_node(ni); 3145 break; 3146 } 3147 } while((m = mnext) != NULL); 3148 3149 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3150 3151 unlock: WPI_TX_UNLOCK(sc); 3152 3153 return (error); 3154 } 3155 3156 static void 3157 wpi_watchdog_rfkill(void *arg) 3158 { 3159 struct wpi_softc *sc = arg; 3160 struct ieee80211com *ic = &sc->sc_ic; 3161 3162 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3163 3164 /* No need to lock firmware memory. */ 3165 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3166 /* Radio kill switch is still off. */ 3167 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3168 sc); 3169 } else 3170 ieee80211_runtask(ic, &sc->sc_radioon_task); 3171 } 3172 3173 static void 3174 wpi_scan_timeout(void *arg) 3175 { 3176 struct wpi_softc *sc = arg; 3177 struct ieee80211com *ic = &sc->sc_ic; 3178 3179 ic_printf(ic, "scan timeout\n"); 3180 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3181 } 3182 3183 static void 3184 wpi_tx_timeout(void *arg) 3185 { 3186 struct wpi_softc *sc = arg; 3187 struct ieee80211com *ic = &sc->sc_ic; 3188 3189 ic_printf(ic, "device timeout\n"); 3190 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3191 } 3192 3193 static void 3194 wpi_parent(struct ieee80211com *ic) 3195 { 3196 struct wpi_softc *sc = ic->ic_softc; 3197 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3198 3199 if (ic->ic_nrunning > 0) { 3200 if (wpi_init(sc) == 0) { 3201 ieee80211_notify_radio(ic, 1); 3202 ieee80211_start_all(ic); 3203 } else { 3204 ieee80211_notify_radio(ic, 0); 3205 ieee80211_stop(vap); 3206 } 3207 } else 3208 wpi_stop(sc); 3209 } 3210 3211 /* 3212 * Send a command to the firmware. 3213 */ 3214 static int 3215 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3216 int async) 3217 { 3218 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3219 struct wpi_tx_desc *desc; 3220 struct wpi_tx_data *data; 3221 struct wpi_tx_cmd *cmd; 3222 struct mbuf *m; 3223 bus_addr_t paddr; 3224 uint16_t totlen; 3225 int error; 3226 3227 WPI_TXQ_LOCK(sc); 3228 3229 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3230 3231 if (__predict_false(sc->sc_running == 0)) { 3232 /* wpi_stop() was called */ 3233 if (code == WPI_CMD_SCAN) 3234 error = ENETDOWN; 3235 else 3236 error = 0; 3237 3238 goto fail; 3239 } 3240 3241 if (async == 0) 3242 WPI_LOCK_ASSERT(sc); 3243 3244 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3245 __func__, wpi_cmd_str(code), size, async); 3246 3247 desc = &ring->desc[ring->cur]; 3248 data = &ring->data[ring->cur]; 3249 totlen = 4 + size; 3250 3251 if (size > sizeof cmd->data) { 3252 /* Command is too large to fit in a descriptor. */ 3253 if (totlen > MCLBYTES) { 3254 error = EINVAL; 3255 goto fail; 3256 } 3257 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3258 if (m == NULL) { 3259 error = ENOMEM; 3260 goto fail; 3261 } 3262 cmd = mtod(m, struct wpi_tx_cmd *); 3263 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3264 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3265 if (error != 0) { 3266 m_freem(m); 3267 goto fail; 3268 } 3269 data->m = m; 3270 } else { 3271 cmd = &ring->cmd[ring->cur]; 3272 paddr = data->cmd_paddr; 3273 } 3274 3275 cmd->code = code; 3276 cmd->flags = 0; 3277 cmd->qid = ring->qid; 3278 cmd->idx = ring->cur; 3279 memcpy(cmd->data, buf, size); 3280 3281 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3282 desc->segs[0].addr = htole32(paddr); 3283 desc->segs[0].len = htole32(totlen); 3284 3285 if (size > sizeof cmd->data) { 3286 bus_dmamap_sync(ring->data_dmat, data->map, 3287 BUS_DMASYNC_PREWRITE); 3288 } else { 3289 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3290 BUS_DMASYNC_PREWRITE); 3291 } 3292 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3293 BUS_DMASYNC_PREWRITE); 3294 3295 /* Kick command ring. */ 3296 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3297 sc->sc_update_tx_ring(sc, ring); 3298 3299 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3300 3301 WPI_TXQ_UNLOCK(sc); 3302 3303 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3304 3305 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3306 3307 WPI_TXQ_UNLOCK(sc); 3308 3309 return error; 3310 } 3311 3312 /* 3313 * Configure HW multi-rate retries. 3314 */ 3315 static int 3316 wpi_mrr_setup(struct wpi_softc *sc) 3317 { 3318 struct ieee80211com *ic = &sc->sc_ic; 3319 struct wpi_mrr_setup mrr; 3320 uint8_t i; 3321 int error; 3322 3323 /* CCK rates (not used with 802.11a). */ 3324 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3325 mrr.rates[i].flags = 0; 3326 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3327 /* Fallback to the immediate lower CCK rate (if any.) */ 3328 mrr.rates[i].next = 3329 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3330 /* Try twice at this rate before falling back to "next". */ 3331 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3332 } 3333 /* OFDM rates (not used with 802.11b). */ 3334 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3335 mrr.rates[i].flags = 0; 3336 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3337 /* Fallback to the immediate lower rate (if any.) */ 3338 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3339 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3340 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3341 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3342 i - 1; 3343 /* Try twice at this rate before falling back to "next". */ 3344 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3345 } 3346 /* Setup MRR for control frames. */ 3347 mrr.which = htole32(WPI_MRR_CTL); 3348 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3349 if (error != 0) { 3350 device_printf(sc->sc_dev, 3351 "could not setup MRR for control frames\n"); 3352 return error; 3353 } 3354 /* Setup MRR for data frames. */ 3355 mrr.which = htole32(WPI_MRR_DATA); 3356 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3357 if (error != 0) { 3358 device_printf(sc->sc_dev, 3359 "could not setup MRR for data frames\n"); 3360 return error; 3361 } 3362 return 0; 3363 } 3364 3365 static int 3366 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3367 { 3368 struct ieee80211com *ic = ni->ni_ic; 3369 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3370 struct wpi_node *wn = WPI_NODE(ni); 3371 struct wpi_node_info node; 3372 int error; 3373 3374 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3375 3376 if (wn->id == WPI_ID_UNDEFINED) 3377 return EINVAL; 3378 3379 memset(&node, 0, sizeof node); 3380 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3381 node.id = wn->id; 3382 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3383 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3384 node.action = htole32(WPI_ACTION_SET_RATE); 3385 node.antenna = WPI_ANTENNA_BOTH; 3386 3387 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3388 wn->id, ether_sprintf(ni->ni_macaddr)); 3389 3390 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3391 if (error != 0) { 3392 device_printf(sc->sc_dev, 3393 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3394 error); 3395 return error; 3396 } 3397 3398 if (wvp->wv_gtk != 0) { 3399 error = wpi_set_global_keys(ni); 3400 if (error != 0) { 3401 device_printf(sc->sc_dev, 3402 "%s: error while setting global keys\n", __func__); 3403 return ENXIO; 3404 } 3405 } 3406 3407 return 0; 3408 } 3409 3410 /* 3411 * Broadcast node is used to send group-addressed and management frames. 3412 */ 3413 static int 3414 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3415 { 3416 struct ieee80211com *ic = &sc->sc_ic; 3417 struct wpi_node_info node; 3418 3419 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3420 3421 memset(&node, 0, sizeof node); 3422 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3423 node.id = WPI_ID_BROADCAST; 3424 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3425 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3426 node.action = htole32(WPI_ACTION_SET_RATE); 3427 node.antenna = WPI_ANTENNA_BOTH; 3428 3429 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3430 3431 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3432 } 3433 3434 static int 3435 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3436 { 3437 struct wpi_node *wn = WPI_NODE(ni); 3438 int error; 3439 3440 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3441 3442 wn->id = wpi_add_node_entry_sta(sc); 3443 3444 if ((error = wpi_add_node(sc, ni)) != 0) { 3445 wpi_del_node_entry(sc, wn->id); 3446 wn->id = WPI_ID_UNDEFINED; 3447 return error; 3448 } 3449 3450 return 0; 3451 } 3452 3453 static int 3454 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3455 { 3456 struct wpi_node *wn = WPI_NODE(ni); 3457 int error; 3458 3459 KASSERT(wn->id == WPI_ID_UNDEFINED, 3460 ("the node %d was added before", wn->id)); 3461 3462 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3463 3464 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3465 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3466 return ENOMEM; 3467 } 3468 3469 if ((error = wpi_add_node(sc, ni)) != 0) { 3470 wpi_del_node_entry(sc, wn->id); 3471 wn->id = WPI_ID_UNDEFINED; 3472 return error; 3473 } 3474 3475 return 0; 3476 } 3477 3478 static void 3479 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3480 { 3481 struct wpi_node *wn = WPI_NODE(ni); 3482 struct wpi_cmd_del_node node; 3483 int error; 3484 3485 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3486 3487 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3488 3489 memset(&node, 0, sizeof node); 3490 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3491 node.count = 1; 3492 3493 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3494 wn->id, ether_sprintf(ni->ni_macaddr)); 3495 3496 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3497 if (error != 0) { 3498 device_printf(sc->sc_dev, 3499 "%s: could not delete node %u, error %d\n", __func__, 3500 wn->id, error); 3501 } 3502 } 3503 3504 static int 3505 wpi_updateedca(struct ieee80211com *ic) 3506 { 3507 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3508 struct wpi_softc *sc = ic->ic_softc; 3509 struct wpi_edca_params cmd; 3510 int aci, error; 3511 3512 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3513 3514 memset(&cmd, 0, sizeof cmd); 3515 cmd.flags = htole32(WPI_EDCA_UPDATE); 3516 for (aci = 0; aci < WME_NUM_AC; aci++) { 3517 const struct wmeParams *ac = 3518 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3519 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3520 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3521 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3522 cmd.ac[aci].txoplimit = 3523 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3524 3525 DPRINTF(sc, WPI_DEBUG_EDCA, 3526 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3527 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3528 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3529 cmd.ac[aci].txoplimit); 3530 } 3531 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3532 3533 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3534 3535 return error; 3536 #undef WPI_EXP2 3537 } 3538 3539 static void 3540 wpi_set_promisc(struct wpi_softc *sc) 3541 { 3542 struct ieee80211com *ic = &sc->sc_ic; 3543 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3544 uint32_t promisc_filter; 3545 3546 promisc_filter = WPI_FILTER_CTL; 3547 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3548 promisc_filter |= WPI_FILTER_PROMISC; 3549 3550 if (ic->ic_promisc > 0) 3551 sc->rxon.filter |= htole32(promisc_filter); 3552 else 3553 sc->rxon.filter &= ~htole32(promisc_filter); 3554 } 3555 3556 static void 3557 wpi_update_promisc(struct ieee80211com *ic) 3558 { 3559 struct wpi_softc *sc = ic->ic_softc; 3560 3561 WPI_RXON_LOCK(sc); 3562 wpi_set_promisc(sc); 3563 3564 if (wpi_send_rxon(sc, 1, 1) != 0) { 3565 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3566 __func__); 3567 } 3568 WPI_RXON_UNLOCK(sc); 3569 } 3570 3571 static void 3572 wpi_update_mcast(struct ieee80211com *ic) 3573 { 3574 /* Ignore */ 3575 } 3576 3577 static void 3578 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3579 { 3580 struct wpi_cmd_led led; 3581 3582 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3583 3584 led.which = which; 3585 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3586 led.off = off; 3587 led.on = on; 3588 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3589 } 3590 3591 static int 3592 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3593 { 3594 struct wpi_cmd_timing cmd; 3595 uint64_t val, mod; 3596 3597 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3598 3599 memset(&cmd, 0, sizeof cmd); 3600 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3601 cmd.bintval = htole16(ni->ni_intval); 3602 cmd.lintval = htole16(10); 3603 3604 /* Compute remaining time until next beacon. */ 3605 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3606 mod = le64toh(cmd.tstamp) % val; 3607 cmd.binitval = htole32((uint32_t)(val - mod)); 3608 3609 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3610 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3611 3612 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3613 } 3614 3615 /* 3616 * This function is called periodically (every 60 seconds) to adjust output 3617 * power to temperature changes. 3618 */ 3619 static void 3620 wpi_power_calibration(struct wpi_softc *sc) 3621 { 3622 int temp; 3623 3624 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3625 3626 /* Update sensor data. */ 3627 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3628 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3629 3630 /* Sanity-check read value. */ 3631 if (temp < -260 || temp > 25) { 3632 /* This can't be correct, ignore. */ 3633 DPRINTF(sc, WPI_DEBUG_TEMP, 3634 "out-of-range temperature reported: %d\n", temp); 3635 return; 3636 } 3637 3638 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3639 3640 /* Adjust Tx power if need be. */ 3641 if (abs(temp - sc->temp) <= 6) 3642 return; 3643 3644 sc->temp = temp; 3645 3646 if (wpi_set_txpower(sc, 1) != 0) { 3647 /* just warn, too bad for the automatic calibration... */ 3648 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3649 } 3650 } 3651 3652 /* 3653 * Set TX power for current channel. 3654 */ 3655 static int 3656 wpi_set_txpower(struct wpi_softc *sc, int async) 3657 { 3658 struct wpi_power_group *group; 3659 struct wpi_cmd_txpower cmd; 3660 uint8_t chan; 3661 int idx, is_chan_5ghz, i; 3662 3663 /* Retrieve current channel from last RXON. */ 3664 chan = sc->rxon.chan; 3665 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3666 3667 /* Find the TX power group to which this channel belongs. */ 3668 if (is_chan_5ghz) { 3669 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3670 if (chan <= group->chan) 3671 break; 3672 } else 3673 group = &sc->groups[0]; 3674 3675 memset(&cmd, 0, sizeof cmd); 3676 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3677 cmd.chan = htole16(chan); 3678 3679 /* Set TX power for all OFDM and CCK rates. */ 3680 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3681 /* Retrieve TX power for this channel/rate. */ 3682 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3683 3684 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3685 3686 if (is_chan_5ghz) { 3687 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3688 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3689 } else { 3690 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3691 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3692 } 3693 DPRINTF(sc, WPI_DEBUG_TEMP, 3694 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3695 } 3696 3697 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3698 } 3699 3700 /* 3701 * Determine Tx power index for a given channel/rate combination. 3702 * This takes into account the regulatory information from EEPROM and the 3703 * current temperature. 3704 */ 3705 static int 3706 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3707 uint8_t chan, int is_chan_5ghz, int ridx) 3708 { 3709 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3710 #define fdivround(a, b, n) \ 3711 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3712 3713 /* Linear interpolation. */ 3714 #define interpolate(x, x1, y1, x2, y2, n) \ 3715 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3716 3717 struct wpi_power_sample *sample; 3718 int pwr, idx; 3719 3720 /* Default TX power is group maximum TX power minus 3dB. */ 3721 pwr = group->maxpwr / 2; 3722 3723 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3724 switch (ridx) { 3725 case WPI_RIDX_OFDM36: 3726 pwr -= is_chan_5ghz ? 5 : 0; 3727 break; 3728 case WPI_RIDX_OFDM48: 3729 pwr -= is_chan_5ghz ? 10 : 7; 3730 break; 3731 case WPI_RIDX_OFDM54: 3732 pwr -= is_chan_5ghz ? 12 : 9; 3733 break; 3734 } 3735 3736 /* Never exceed the channel maximum allowed TX power. */ 3737 pwr = min(pwr, sc->maxpwr[chan]); 3738 3739 /* Retrieve TX power index into gain tables from samples. */ 3740 for (sample = group->samples; sample < &group->samples[3]; sample++) 3741 if (pwr > sample[1].power) 3742 break; 3743 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3744 idx = interpolate(pwr, sample[0].power, sample[0].index, 3745 sample[1].power, sample[1].index, 19); 3746 3747 /*- 3748 * Adjust power index based on current temperature: 3749 * - if cooler than factory-calibrated: decrease output power 3750 * - if warmer than factory-calibrated: increase output power 3751 */ 3752 idx -= (sc->temp - group->temp) * 11 / 100; 3753 3754 /* Decrease TX power for CCK rates (-5dB). */ 3755 if (ridx >= WPI_RIDX_CCK1) 3756 idx += 10; 3757 3758 /* Make sure idx stays in a valid range. */ 3759 if (idx < 0) 3760 return 0; 3761 if (idx > WPI_MAX_PWR_INDEX) 3762 return WPI_MAX_PWR_INDEX; 3763 return idx; 3764 3765 #undef interpolate 3766 #undef fdivround 3767 } 3768 3769 /* 3770 * Set STA mode power saving level (between 0 and 5). 3771 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3772 */ 3773 static int 3774 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3775 { 3776 struct wpi_pmgt_cmd cmd; 3777 const struct wpi_pmgt *pmgt; 3778 uint32_t max, reg; 3779 uint8_t skip_dtim; 3780 int i; 3781 3782 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3783 "%s: dtim=%d, level=%d, async=%d\n", 3784 __func__, dtim, level, async); 3785 3786 /* Select which PS parameters to use. */ 3787 if (dtim <= 10) 3788 pmgt = &wpi_pmgt[0][level]; 3789 else 3790 pmgt = &wpi_pmgt[1][level]; 3791 3792 memset(&cmd, 0, sizeof cmd); 3793 if (level != 0) /* not CAM */ 3794 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3795 /* Retrieve PCIe Active State Power Management (ASPM). */ 3796 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3797 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3798 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3799 3800 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3801 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3802 3803 if (dtim == 0) { 3804 dtim = 1; 3805 skip_dtim = 0; 3806 } else 3807 skip_dtim = pmgt->skip_dtim; 3808 3809 if (skip_dtim != 0) { 3810 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3811 max = pmgt->intval[4]; 3812 if (max == (uint32_t)-1) 3813 max = dtim * (skip_dtim + 1); 3814 else if (max > dtim) 3815 max = (max / dtim) * dtim; 3816 } else 3817 max = dtim; 3818 3819 for (i = 0; i < 5; i++) 3820 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3821 3822 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3823 } 3824 3825 static int 3826 wpi_send_btcoex(struct wpi_softc *sc) 3827 { 3828 struct wpi_bluetooth cmd; 3829 3830 memset(&cmd, 0, sizeof cmd); 3831 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3832 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3833 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3834 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3835 __func__); 3836 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3837 } 3838 3839 static int 3840 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3841 { 3842 int error; 3843 3844 if (async) 3845 WPI_RXON_LOCK_ASSERT(sc); 3846 3847 if (assoc && wpi_check_bss_filter(sc) != 0) { 3848 struct wpi_assoc rxon_assoc; 3849 3850 rxon_assoc.flags = sc->rxon.flags; 3851 rxon_assoc.filter = sc->rxon.filter; 3852 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3853 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3854 rxon_assoc.reserved = 0; 3855 3856 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3857 sizeof (struct wpi_assoc), async); 3858 if (error != 0) { 3859 device_printf(sc->sc_dev, 3860 "RXON_ASSOC command failed, error %d\n", error); 3861 return error; 3862 } 3863 } else { 3864 if (async) { 3865 WPI_NT_LOCK(sc); 3866 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3867 sizeof (struct wpi_rxon), async); 3868 if (error == 0) 3869 wpi_clear_node_table(sc); 3870 WPI_NT_UNLOCK(sc); 3871 } else { 3872 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3873 sizeof (struct wpi_rxon), async); 3874 if (error == 0) 3875 wpi_clear_node_table(sc); 3876 } 3877 3878 if (error != 0) { 3879 device_printf(sc->sc_dev, 3880 "RXON command failed, error %d\n", error); 3881 return error; 3882 } 3883 3884 /* Add broadcast node. */ 3885 error = wpi_add_broadcast_node(sc, async); 3886 if (error != 0) { 3887 device_printf(sc->sc_dev, 3888 "could not add broadcast node, error %d\n", error); 3889 return error; 3890 } 3891 } 3892 3893 /* Configuration has changed, set Tx power accordingly. */ 3894 if ((error = wpi_set_txpower(sc, async)) != 0) { 3895 device_printf(sc->sc_dev, 3896 "%s: could not set TX power, error %d\n", __func__, error); 3897 return error; 3898 } 3899 3900 return 0; 3901 } 3902 3903 /** 3904 * Configure the card to listen to a particular channel, this transisions the 3905 * card in to being able to receive frames from remote devices. 3906 */ 3907 static int 3908 wpi_config(struct wpi_softc *sc) 3909 { 3910 struct ieee80211com *ic = &sc->sc_ic; 3911 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3912 struct ieee80211_channel *c = ic->ic_curchan; 3913 int error; 3914 3915 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3916 3917 /* Set power saving level to CAM during initialization. */ 3918 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3919 device_printf(sc->sc_dev, 3920 "%s: could not set power saving level\n", __func__); 3921 return error; 3922 } 3923 3924 /* Configure bluetooth coexistence. */ 3925 if ((error = wpi_send_btcoex(sc)) != 0) { 3926 device_printf(sc->sc_dev, 3927 "could not configure bluetooth coexistence\n"); 3928 return error; 3929 } 3930 3931 /* Configure adapter. */ 3932 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3933 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3934 3935 /* Set default channel. */ 3936 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3937 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3938 if (IEEE80211_IS_CHAN_2GHZ(c)) 3939 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3940 3941 sc->rxon.filter = WPI_FILTER_MULTICAST; 3942 switch (ic->ic_opmode) { 3943 case IEEE80211_M_STA: 3944 sc->rxon.mode = WPI_MODE_STA; 3945 break; 3946 case IEEE80211_M_IBSS: 3947 sc->rxon.mode = WPI_MODE_IBSS; 3948 sc->rxon.filter |= WPI_FILTER_BEACON; 3949 break; 3950 case IEEE80211_M_HOSTAP: 3951 /* XXX workaround for beaconing */ 3952 sc->rxon.mode = WPI_MODE_IBSS; 3953 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3954 break; 3955 case IEEE80211_M_AHDEMO: 3956 sc->rxon.mode = WPI_MODE_HOSTAP; 3957 break; 3958 case IEEE80211_M_MONITOR: 3959 sc->rxon.mode = WPI_MODE_MONITOR; 3960 break; 3961 default: 3962 device_printf(sc->sc_dev, "unknown opmode %d\n", 3963 ic->ic_opmode); 3964 return EINVAL; 3965 } 3966 sc->rxon.filter = htole32(sc->rxon.filter); 3967 wpi_set_promisc(sc); 3968 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3969 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3970 3971 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3972 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3973 __func__); 3974 return error; 3975 } 3976 3977 /* Setup rate scalling. */ 3978 if ((error = wpi_mrr_setup(sc)) != 0) { 3979 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3980 error); 3981 return error; 3982 } 3983 3984 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3985 3986 return 0; 3987 } 3988 3989 static uint16_t 3990 wpi_get_active_dwell_time(struct wpi_softc *sc, 3991 struct ieee80211_channel *c, uint8_t n_probes) 3992 { 3993 /* No channel? Default to 2GHz settings. */ 3994 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3995 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3996 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3997 } 3998 3999 /* 5GHz dwell time. */ 4000 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4001 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4002 } 4003 4004 /* 4005 * Limit the total dwell time. 4006 * 4007 * Returns the dwell time in milliseconds. 4008 */ 4009 static uint16_t 4010 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4011 { 4012 struct ieee80211com *ic = &sc->sc_ic; 4013 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4014 uint16_t bintval = 0; 4015 4016 /* bintval is in TU (1.024mS) */ 4017 if (vap != NULL) 4018 bintval = vap->iv_bss->ni_intval; 4019 4020 /* 4021 * If it's non-zero, we should calculate the minimum of 4022 * it and the DWELL_BASE. 4023 * 4024 * XXX Yes, the math should take into account that bintval 4025 * is 1.024mS, not 1mS.. 4026 */ 4027 if (bintval > 0) { 4028 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4029 bintval); 4030 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4031 } 4032 4033 /* No association context? Default. */ 4034 return dwell_time; 4035 } 4036 4037 static uint16_t 4038 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4039 { 4040 uint16_t passive; 4041 4042 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4043 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4044 else 4045 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4046 4047 /* Clamp to the beacon interval if we're associated. */ 4048 return (wpi_limit_dwell(sc, passive)); 4049 } 4050 4051 static uint32_t 4052 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4053 { 4054 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4055 uint32_t nbeacons = time / bintval; 4056 4057 if (mod > WPI_PAUSE_MAX_TIME) 4058 mod = WPI_PAUSE_MAX_TIME; 4059 4060 return WPI_PAUSE_SCAN(nbeacons, mod); 4061 } 4062 4063 /* 4064 * Send a scan request to the firmware. 4065 */ 4066 static int 4067 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4068 { 4069 struct ieee80211com *ic = &sc->sc_ic; 4070 struct ieee80211_scan_state *ss = ic->ic_scan; 4071 struct ieee80211vap *vap = ss->ss_vap; 4072 struct wpi_scan_hdr *hdr; 4073 struct wpi_cmd_data *tx; 4074 struct wpi_scan_essid *essids; 4075 struct wpi_scan_chan *chan; 4076 struct ieee80211_frame *wh; 4077 struct ieee80211_rateset *rs; 4078 uint16_t bintval, buflen, dwell_active, dwell_passive; 4079 uint8_t *buf, *frm, i, nssid; 4080 int bgscan, error; 4081 4082 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4083 4084 /* 4085 * We are absolutely not allowed to send a scan command when another 4086 * scan command is pending. 4087 */ 4088 if (callout_pending(&sc->scan_timeout)) { 4089 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4090 __func__); 4091 error = EAGAIN; 4092 goto fail; 4093 } 4094 4095 bgscan = wpi_check_bss_filter(sc); 4096 bintval = vap->iv_bss->ni_intval; 4097 if (bgscan != 0 && 4098 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4099 error = EOPNOTSUPP; 4100 goto fail; 4101 } 4102 4103 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4104 if (buf == NULL) { 4105 device_printf(sc->sc_dev, 4106 "%s: could not allocate buffer for scan command\n", 4107 __func__); 4108 error = ENOMEM; 4109 goto fail; 4110 } 4111 hdr = (struct wpi_scan_hdr *)buf; 4112 4113 /* 4114 * Move to the next channel if no packets are received within 10 msecs 4115 * after sending the probe request. 4116 */ 4117 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4118 hdr->quiet_threshold = htole16(1); 4119 4120 if (bgscan != 0) { 4121 /* 4122 * Max needs to be greater than active and passive and quiet! 4123 * It's also in microseconds! 4124 */ 4125 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4126 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4127 bintval)); 4128 } 4129 4130 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4131 4132 tx = (struct wpi_cmd_data *)(hdr + 1); 4133 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4134 tx->id = WPI_ID_BROADCAST; 4135 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4136 4137 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4138 /* Send probe requests at 6Mbps. */ 4139 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4140 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4141 } else { 4142 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4143 /* Send probe requests at 1Mbps. */ 4144 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4145 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4146 } 4147 4148 essids = (struct wpi_scan_essid *)(tx + 1); 4149 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4150 for (i = 0; i < nssid; i++) { 4151 essids[i].id = IEEE80211_ELEMID_SSID; 4152 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4153 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4154 #ifdef WPI_DEBUG 4155 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4156 printf("Scanning Essid: "); 4157 ieee80211_print_essid(essids[i].data, essids[i].len); 4158 printf("\n"); 4159 } 4160 #endif 4161 } 4162 4163 /* 4164 * Build a probe request frame. Most of the following code is a 4165 * copy & paste of what is done in net80211. 4166 */ 4167 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4168 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4169 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4170 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4171 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4172 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4173 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4174 4175 frm = (uint8_t *)(wh + 1); 4176 frm = ieee80211_add_ssid(frm, NULL, 0); 4177 frm = ieee80211_add_rates(frm, rs); 4178 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4179 frm = ieee80211_add_xrates(frm, rs); 4180 4181 /* Set length of probe request. */ 4182 tx->len = htole16(frm - (uint8_t *)wh); 4183 4184 /* 4185 * Construct information about the channel that we 4186 * want to scan. The firmware expects this to be directly 4187 * after the scan probe request 4188 */ 4189 chan = (struct wpi_scan_chan *)frm; 4190 chan->chan = ieee80211_chan2ieee(ic, c); 4191 chan->flags = 0; 4192 if (nssid) { 4193 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4194 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4195 } else 4196 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4197 4198 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4199 chan->flags |= WPI_CHAN_ACTIVE; 4200 4201 /* 4202 * Calculate the active/passive dwell times. 4203 */ 4204 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4205 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4206 4207 /* Make sure they're valid. */ 4208 if (dwell_active > dwell_passive) 4209 dwell_active = dwell_passive; 4210 4211 chan->active = htole16(dwell_active); 4212 chan->passive = htole16(dwell_passive); 4213 4214 chan->dsp_gain = 0x6e; /* Default level */ 4215 4216 if (IEEE80211_IS_CHAN_5GHZ(c)) 4217 chan->rf_gain = 0x3b; 4218 else 4219 chan->rf_gain = 0x28; 4220 4221 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4222 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4223 4224 hdr->nchan++; 4225 4226 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4227 /* XXX Force probe request transmission. */ 4228 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4229 4230 chan++; 4231 4232 /* Reduce unnecessary delay. */ 4233 chan->flags = 0; 4234 chan->passive = chan->active = hdr->quiet_time; 4235 4236 hdr->nchan++; 4237 } 4238 4239 chan++; 4240 4241 buflen = (uint8_t *)chan - buf; 4242 hdr->len = htole16(buflen); 4243 4244 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4245 hdr->nchan); 4246 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4247 free(buf, M_DEVBUF); 4248 4249 if (error != 0) 4250 goto fail; 4251 4252 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4253 4254 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4255 4256 return 0; 4257 4258 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4259 4260 return error; 4261 } 4262 4263 static int 4264 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4265 { 4266 struct ieee80211com *ic = vap->iv_ic; 4267 struct ieee80211_node *ni = vap->iv_bss; 4268 struct ieee80211_channel *c = ni->ni_chan; 4269 int error; 4270 4271 WPI_RXON_LOCK(sc); 4272 4273 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4274 4275 /* Update adapter configuration. */ 4276 sc->rxon.associd = 0; 4277 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4278 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4279 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4280 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4281 if (IEEE80211_IS_CHAN_2GHZ(c)) 4282 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4283 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4284 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4285 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4286 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4287 if (IEEE80211_IS_CHAN_A(c)) { 4288 sc->rxon.cck_mask = 0; 4289 sc->rxon.ofdm_mask = 0x15; 4290 } else if (IEEE80211_IS_CHAN_B(c)) { 4291 sc->rxon.cck_mask = 0x03; 4292 sc->rxon.ofdm_mask = 0; 4293 } else { 4294 /* Assume 802.11b/g. */ 4295 sc->rxon.cck_mask = 0x0f; 4296 sc->rxon.ofdm_mask = 0x15; 4297 } 4298 4299 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4300 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4301 sc->rxon.ofdm_mask); 4302 4303 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4304 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4305 __func__); 4306 } 4307 4308 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4309 4310 WPI_RXON_UNLOCK(sc); 4311 4312 return error; 4313 } 4314 4315 static int 4316 wpi_config_beacon(struct wpi_vap *wvp) 4317 { 4318 struct ieee80211vap *vap = &wvp->wv_vap; 4319 struct ieee80211com *ic = vap->iv_ic; 4320 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4321 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4322 struct wpi_softc *sc = ic->ic_softc; 4323 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4324 struct ieee80211_tim_ie *tie; 4325 struct mbuf *m; 4326 uint8_t *ptr; 4327 int error; 4328 4329 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4330 4331 WPI_VAP_LOCK_ASSERT(wvp); 4332 4333 cmd->len = htole16(bcn->m->m_pkthdr.len); 4334 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4335 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4336 4337 /* XXX seems to be unused */ 4338 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4339 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4340 ptr = mtod(bcn->m, uint8_t *); 4341 4342 cmd->tim = htole16(bo->bo_tim - ptr); 4343 cmd->timsz = tie->tim_len; 4344 } 4345 4346 /* Necessary for recursion in ieee80211_beacon_update(). */ 4347 m = bcn->m; 4348 bcn->m = m_dup(m, M_NOWAIT); 4349 if (bcn->m == NULL) { 4350 device_printf(sc->sc_dev, 4351 "%s: could not copy beacon frame\n", __func__); 4352 error = ENOMEM; 4353 goto end; 4354 } 4355 4356 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4357 device_printf(sc->sc_dev, 4358 "%s: could not update beacon frame, error %d", __func__, 4359 error); 4360 m_freem(bcn->m); 4361 } 4362 4363 /* Restore mbuf. */ 4364 end: bcn->m = m; 4365 4366 return error; 4367 } 4368 4369 static int 4370 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4371 { 4372 struct ieee80211vap *vap = ni->ni_vap; 4373 struct wpi_vap *wvp = WPI_VAP(vap); 4374 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4375 struct mbuf *m; 4376 int error; 4377 4378 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4379 4380 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4381 return EINVAL; 4382 4383 m = ieee80211_beacon_alloc(ni); 4384 if (m == NULL) { 4385 device_printf(sc->sc_dev, 4386 "%s: could not allocate beacon frame\n", __func__); 4387 return ENOMEM; 4388 } 4389 4390 WPI_VAP_LOCK(wvp); 4391 if (bcn->m != NULL) 4392 m_freem(bcn->m); 4393 4394 bcn->m = m; 4395 4396 error = wpi_config_beacon(wvp); 4397 WPI_VAP_UNLOCK(wvp); 4398 4399 return error; 4400 } 4401 4402 static void 4403 wpi_update_beacon(struct ieee80211vap *vap, int item) 4404 { 4405 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4406 struct wpi_vap *wvp = WPI_VAP(vap); 4407 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4408 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4409 struct ieee80211_node *ni = vap->iv_bss; 4410 int mcast = 0; 4411 4412 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4413 4414 WPI_VAP_LOCK(wvp); 4415 if (bcn->m == NULL) { 4416 bcn->m = ieee80211_beacon_alloc(ni); 4417 if (bcn->m == NULL) { 4418 device_printf(sc->sc_dev, 4419 "%s: could not allocate beacon frame\n", __func__); 4420 4421 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4422 __func__); 4423 4424 WPI_VAP_UNLOCK(wvp); 4425 return; 4426 } 4427 } 4428 WPI_VAP_UNLOCK(wvp); 4429 4430 if (item == IEEE80211_BEACON_TIM) 4431 mcast = 1; /* TODO */ 4432 4433 setbit(bo->bo_flags, item); 4434 ieee80211_beacon_update(ni, bcn->m, mcast); 4435 4436 WPI_VAP_LOCK(wvp); 4437 wpi_config_beacon(wvp); 4438 WPI_VAP_UNLOCK(wvp); 4439 4440 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4441 } 4442 4443 static void 4444 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4445 { 4446 struct ieee80211vap *vap = ni->ni_vap; 4447 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4448 struct wpi_node *wn = WPI_NODE(ni); 4449 int error; 4450 4451 WPI_NT_LOCK(sc); 4452 4453 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4454 4455 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4456 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4457 device_printf(sc->sc_dev, 4458 "%s: could not add IBSS node, error %d\n", 4459 __func__, error); 4460 } 4461 } 4462 WPI_NT_UNLOCK(sc); 4463 } 4464 4465 static int 4466 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4467 { 4468 struct ieee80211com *ic = vap->iv_ic; 4469 struct ieee80211_node *ni = vap->iv_bss; 4470 struct ieee80211_channel *c = ni->ni_chan; 4471 int error; 4472 4473 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4474 4475 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4476 /* Link LED blinks while monitoring. */ 4477 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4478 return 0; 4479 } 4480 4481 /* XXX kernel panic workaround */ 4482 if (c == IEEE80211_CHAN_ANYC) { 4483 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4484 __func__); 4485 return EINVAL; 4486 } 4487 4488 if ((error = wpi_set_timing(sc, ni)) != 0) { 4489 device_printf(sc->sc_dev, 4490 "%s: could not set timing, error %d\n", __func__, error); 4491 return error; 4492 } 4493 4494 /* Update adapter configuration. */ 4495 WPI_RXON_LOCK(sc); 4496 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4497 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4498 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4499 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4500 if (IEEE80211_IS_CHAN_2GHZ(c)) 4501 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4502 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4503 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4504 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4505 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4506 if (IEEE80211_IS_CHAN_A(c)) { 4507 sc->rxon.cck_mask = 0; 4508 sc->rxon.ofdm_mask = 0x15; 4509 } else if (IEEE80211_IS_CHAN_B(c)) { 4510 sc->rxon.cck_mask = 0x03; 4511 sc->rxon.ofdm_mask = 0; 4512 } else { 4513 /* Assume 802.11b/g. */ 4514 sc->rxon.cck_mask = 0x0f; 4515 sc->rxon.ofdm_mask = 0x15; 4516 } 4517 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4518 4519 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4520 sc->rxon.chan, sc->rxon.flags); 4521 4522 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4523 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4524 __func__); 4525 return error; 4526 } 4527 4528 /* Start periodic calibration timer. */ 4529 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4530 4531 WPI_RXON_UNLOCK(sc); 4532 4533 if (vap->iv_opmode == IEEE80211_M_IBSS || 4534 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4535 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4536 device_printf(sc->sc_dev, 4537 "%s: could not setup beacon, error %d\n", __func__, 4538 error); 4539 return error; 4540 } 4541 } 4542 4543 if (vap->iv_opmode == IEEE80211_M_STA) { 4544 /* Add BSS node. */ 4545 WPI_NT_LOCK(sc); 4546 error = wpi_add_sta_node(sc, ni); 4547 WPI_NT_UNLOCK(sc); 4548 if (error != 0) { 4549 device_printf(sc->sc_dev, 4550 "%s: could not add BSS node, error %d\n", __func__, 4551 error); 4552 return error; 4553 } 4554 } 4555 4556 /* Link LED always on while associated. */ 4557 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4558 4559 /* Enable power-saving mode if requested by user. */ 4560 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4561 vap->iv_opmode != IEEE80211_M_IBSS) 4562 (void)wpi_set_pslevel(sc, 0, 3, 1); 4563 4564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4565 4566 return 0; 4567 } 4568 4569 static int 4570 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4571 { 4572 const struct ieee80211_cipher *cip = k->wk_cipher; 4573 struct ieee80211vap *vap = ni->ni_vap; 4574 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4575 struct wpi_node *wn = WPI_NODE(ni); 4576 struct wpi_node_info node; 4577 uint16_t kflags; 4578 int error; 4579 4580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4581 4582 if (wpi_check_node_entry(sc, wn->id) == 0) { 4583 device_printf(sc->sc_dev, "%s: node does not exist\n", 4584 __func__); 4585 return 0; 4586 } 4587 4588 switch (cip->ic_cipher) { 4589 case IEEE80211_CIPHER_AES_CCM: 4590 kflags = WPI_KFLAG_CCMP; 4591 break; 4592 4593 default: 4594 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4595 cip->ic_cipher); 4596 return 0; 4597 } 4598 4599 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4600 if (k->wk_flags & IEEE80211_KEY_GROUP) 4601 kflags |= WPI_KFLAG_MULTICAST; 4602 4603 memset(&node, 0, sizeof node); 4604 node.id = wn->id; 4605 node.control = WPI_NODE_UPDATE; 4606 node.flags = WPI_FLAG_KEY_SET; 4607 node.kflags = htole16(kflags); 4608 memcpy(node.key, k->wk_key, k->wk_keylen); 4609 again: 4610 DPRINTF(sc, WPI_DEBUG_KEY, 4611 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4612 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4613 node.id, ether_sprintf(ni->ni_macaddr)); 4614 4615 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4616 if (error != 0) { 4617 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4618 error); 4619 return !error; 4620 } 4621 4622 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4623 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4624 kflags |= WPI_KFLAG_MULTICAST; 4625 node.kflags = htole16(kflags); 4626 4627 goto again; 4628 } 4629 4630 return 1; 4631 } 4632 4633 static void 4634 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4635 { 4636 const struct ieee80211_key *k = arg; 4637 struct ieee80211vap *vap = ni->ni_vap; 4638 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4639 struct wpi_node *wn = WPI_NODE(ni); 4640 int error; 4641 4642 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4643 return; 4644 4645 WPI_NT_LOCK(sc); 4646 error = wpi_load_key(ni, k); 4647 WPI_NT_UNLOCK(sc); 4648 4649 if (error == 0) { 4650 device_printf(sc->sc_dev, "%s: error while setting key\n", 4651 __func__); 4652 } 4653 } 4654 4655 static int 4656 wpi_set_global_keys(struct ieee80211_node *ni) 4657 { 4658 struct ieee80211vap *vap = ni->ni_vap; 4659 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4660 int error = 1; 4661 4662 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4663 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4664 error = wpi_load_key(ni, wk); 4665 4666 return !error; 4667 } 4668 4669 static int 4670 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4671 { 4672 struct ieee80211vap *vap = ni->ni_vap; 4673 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4674 struct wpi_node *wn = WPI_NODE(ni); 4675 struct wpi_node_info node; 4676 uint16_t kflags; 4677 int error; 4678 4679 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4680 4681 if (wpi_check_node_entry(sc, wn->id) == 0) { 4682 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4683 return 1; /* Nothing to do. */ 4684 } 4685 4686 kflags = WPI_KFLAG_KID(k->wk_keyix); 4687 if (k->wk_flags & IEEE80211_KEY_GROUP) 4688 kflags |= WPI_KFLAG_MULTICAST; 4689 4690 memset(&node, 0, sizeof node); 4691 node.id = wn->id; 4692 node.control = WPI_NODE_UPDATE; 4693 node.flags = WPI_FLAG_KEY_SET; 4694 node.kflags = htole16(kflags); 4695 again: 4696 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4697 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4698 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4699 4700 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4701 if (error != 0) { 4702 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4703 error); 4704 return !error; 4705 } 4706 4707 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4708 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4709 kflags |= WPI_KFLAG_MULTICAST; 4710 node.kflags = htole16(kflags); 4711 4712 goto again; 4713 } 4714 4715 return 1; 4716 } 4717 4718 static void 4719 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4720 { 4721 const struct ieee80211_key *k = arg; 4722 struct ieee80211vap *vap = ni->ni_vap; 4723 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4724 struct wpi_node *wn = WPI_NODE(ni); 4725 int error; 4726 4727 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4728 return; 4729 4730 WPI_NT_LOCK(sc); 4731 error = wpi_del_key(ni, k); 4732 WPI_NT_UNLOCK(sc); 4733 4734 if (error == 0) { 4735 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4736 __func__); 4737 } 4738 } 4739 4740 static int 4741 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4742 int set) 4743 { 4744 struct ieee80211com *ic = vap->iv_ic; 4745 struct wpi_softc *sc = ic->ic_softc; 4746 struct wpi_vap *wvp = WPI_VAP(vap); 4747 struct ieee80211_node *ni; 4748 int error, ni_ref = 0; 4749 4750 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4751 4752 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4753 /* Not for us. */ 4754 return 1; 4755 } 4756 4757 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4758 /* XMIT keys are handled in wpi_tx_data(). */ 4759 return 1; 4760 } 4761 4762 /* Handle group keys. */ 4763 if (&vap->iv_nw_keys[0] <= k && 4764 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4765 WPI_NT_LOCK(sc); 4766 if (set) 4767 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4768 else 4769 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4770 WPI_NT_UNLOCK(sc); 4771 4772 if (vap->iv_state == IEEE80211_S_RUN) { 4773 ieee80211_iterate_nodes(&ic->ic_sta, 4774 set ? wpi_load_key_cb : wpi_del_key_cb, 4775 __DECONST(void *, k)); 4776 } 4777 4778 return 1; 4779 } 4780 4781 switch (vap->iv_opmode) { 4782 case IEEE80211_M_STA: 4783 ni = vap->iv_bss; 4784 break; 4785 4786 case IEEE80211_M_IBSS: 4787 case IEEE80211_M_AHDEMO: 4788 case IEEE80211_M_HOSTAP: 4789 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4790 if (ni == NULL) 4791 return 0; /* should not happen */ 4792 4793 ni_ref = 1; 4794 break; 4795 4796 default: 4797 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4798 vap->iv_opmode); 4799 return 0; 4800 } 4801 4802 WPI_NT_LOCK(sc); 4803 if (set) 4804 error = wpi_load_key(ni, k); 4805 else 4806 error = wpi_del_key(ni, k); 4807 WPI_NT_UNLOCK(sc); 4808 4809 if (ni_ref) 4810 ieee80211_node_decref(ni); 4811 4812 return error; 4813 } 4814 4815 static int 4816 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4817 { 4818 return wpi_process_key(vap, k, 1); 4819 } 4820 4821 static int 4822 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4823 { 4824 return wpi_process_key(vap, k, 0); 4825 } 4826 4827 /* 4828 * This function is called after the runtime firmware notifies us of its 4829 * readiness (called in a process context). 4830 */ 4831 static int 4832 wpi_post_alive(struct wpi_softc *sc) 4833 { 4834 int ntries, error; 4835 4836 /* Check (again) that the radio is not disabled. */ 4837 if ((error = wpi_nic_lock(sc)) != 0) 4838 return error; 4839 4840 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4841 4842 /* NB: Runtime firmware must be up and running. */ 4843 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4844 device_printf(sc->sc_dev, 4845 "RF switch: radio disabled (%s)\n", __func__); 4846 wpi_nic_unlock(sc); 4847 return EPERM; /* :-) */ 4848 } 4849 wpi_nic_unlock(sc); 4850 4851 /* Wait for thermal sensor to calibrate. */ 4852 for (ntries = 0; ntries < 1000; ntries++) { 4853 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4854 break; 4855 DELAY(10); 4856 } 4857 4858 if (ntries == 1000) { 4859 device_printf(sc->sc_dev, 4860 "timeout waiting for thermal sensor calibration\n"); 4861 return ETIMEDOUT; 4862 } 4863 4864 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4865 return 0; 4866 } 4867 4868 /* 4869 * The firmware boot code is small and is intended to be copied directly into 4870 * the NIC internal memory (no DMA transfer). 4871 */ 4872 static int 4873 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4874 { 4875 int error, ntries; 4876 4877 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4878 4879 size /= sizeof (uint32_t); 4880 4881 if ((error = wpi_nic_lock(sc)) != 0) 4882 return error; 4883 4884 /* Copy microcode image into NIC memory. */ 4885 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4886 (const uint32_t *)ucode, size); 4887 4888 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4889 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4890 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4891 4892 /* Start boot load now. */ 4893 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4894 4895 /* Wait for transfer to complete. */ 4896 for (ntries = 0; ntries < 1000; ntries++) { 4897 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4898 DPRINTF(sc, WPI_DEBUG_HW, 4899 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4900 WPI_FH_TX_STATUS_IDLE(6), 4901 status & WPI_FH_TX_STATUS_IDLE(6)); 4902 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4903 DPRINTF(sc, WPI_DEBUG_HW, 4904 "Status Match! - ntries = %d\n", ntries); 4905 break; 4906 } 4907 DELAY(10); 4908 } 4909 if (ntries == 1000) { 4910 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4911 __func__); 4912 wpi_nic_unlock(sc); 4913 return ETIMEDOUT; 4914 } 4915 4916 /* Enable boot after power up. */ 4917 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4918 4919 wpi_nic_unlock(sc); 4920 return 0; 4921 } 4922 4923 static int 4924 wpi_load_firmware(struct wpi_softc *sc) 4925 { 4926 struct wpi_fw_info *fw = &sc->fw; 4927 struct wpi_dma_info *dma = &sc->fw_dma; 4928 int error; 4929 4930 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4931 4932 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4933 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4934 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4935 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4936 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4937 4938 /* Tell adapter where to find initialization sections. */ 4939 if ((error = wpi_nic_lock(sc)) != 0) 4940 return error; 4941 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4942 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4943 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4944 dma->paddr + WPI_FW_DATA_MAXSZ); 4945 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4946 wpi_nic_unlock(sc); 4947 4948 /* Load firmware boot code. */ 4949 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4950 if (error != 0) { 4951 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4952 __func__); 4953 return error; 4954 } 4955 4956 /* Now press "execute". */ 4957 WPI_WRITE(sc, WPI_RESET, 0); 4958 4959 /* Wait at most one second for first alive notification. */ 4960 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4961 device_printf(sc->sc_dev, 4962 "%s: timeout waiting for adapter to initialize, error %d\n", 4963 __func__, error); 4964 return error; 4965 } 4966 4967 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4968 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4969 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4970 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4971 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4972 4973 /* Tell adapter where to find runtime sections. */ 4974 if ((error = wpi_nic_lock(sc)) != 0) 4975 return error; 4976 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4977 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4978 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4979 dma->paddr + WPI_FW_DATA_MAXSZ); 4980 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4981 WPI_FW_UPDATED | fw->main.textsz); 4982 wpi_nic_unlock(sc); 4983 4984 return 0; 4985 } 4986 4987 static int 4988 wpi_read_firmware(struct wpi_softc *sc) 4989 { 4990 const struct firmware *fp; 4991 struct wpi_fw_info *fw = &sc->fw; 4992 const struct wpi_firmware_hdr *hdr; 4993 int error; 4994 4995 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4996 4997 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4998 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4999 5000 WPI_UNLOCK(sc); 5001 fp = firmware_get(WPI_FW_NAME); 5002 WPI_LOCK(sc); 5003 5004 if (fp == NULL) { 5005 device_printf(sc->sc_dev, 5006 "could not load firmware image '%s'\n", WPI_FW_NAME); 5007 return EINVAL; 5008 } 5009 5010 sc->fw_fp = fp; 5011 5012 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5013 device_printf(sc->sc_dev, 5014 "firmware file too short: %zu bytes\n", fp->datasize); 5015 error = EINVAL; 5016 goto fail; 5017 } 5018 5019 fw->size = fp->datasize; 5020 fw->data = (const uint8_t *)fp->data; 5021 5022 /* Extract firmware header information. */ 5023 hdr = (const struct wpi_firmware_hdr *)fw->data; 5024 5025 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5026 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5027 5028 fw->main.textsz = le32toh(hdr->rtextsz); 5029 fw->main.datasz = le32toh(hdr->rdatasz); 5030 fw->init.textsz = le32toh(hdr->itextsz); 5031 fw->init.datasz = le32toh(hdr->idatasz); 5032 fw->boot.textsz = le32toh(hdr->btextsz); 5033 fw->boot.datasz = 0; 5034 5035 /* Sanity-check firmware header. */ 5036 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5037 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5038 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5039 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5040 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5041 (fw->boot.textsz & 3) != 0) { 5042 device_printf(sc->sc_dev, "invalid firmware header\n"); 5043 error = EINVAL; 5044 goto fail; 5045 } 5046 5047 /* Check that all firmware sections fit. */ 5048 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5049 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5050 device_printf(sc->sc_dev, 5051 "firmware file too short: %zu bytes\n", fw->size); 5052 error = EINVAL; 5053 goto fail; 5054 } 5055 5056 /* Get pointers to firmware sections. */ 5057 fw->main.text = (const uint8_t *)(hdr + 1); 5058 fw->main.data = fw->main.text + fw->main.textsz; 5059 fw->init.text = fw->main.data + fw->main.datasz; 5060 fw->init.data = fw->init.text + fw->init.textsz; 5061 fw->boot.text = fw->init.data + fw->init.datasz; 5062 5063 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5064 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5065 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5066 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5067 fw->main.textsz, fw->main.datasz, 5068 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5069 5070 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5071 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5072 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5073 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5074 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5075 5076 return 0; 5077 5078 fail: wpi_unload_firmware(sc); 5079 return error; 5080 } 5081 5082 /** 5083 * Free the referenced firmware image 5084 */ 5085 static void 5086 wpi_unload_firmware(struct wpi_softc *sc) 5087 { 5088 if (sc->fw_fp != NULL) { 5089 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5090 sc->fw_fp = NULL; 5091 } 5092 } 5093 5094 static int 5095 wpi_clock_wait(struct wpi_softc *sc) 5096 { 5097 int ntries; 5098 5099 /* Set "initialization complete" bit. */ 5100 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5101 5102 /* Wait for clock stabilization. */ 5103 for (ntries = 0; ntries < 2500; ntries++) { 5104 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5105 return 0; 5106 DELAY(100); 5107 } 5108 device_printf(sc->sc_dev, 5109 "%s: timeout waiting for clock stabilization\n", __func__); 5110 5111 return ETIMEDOUT; 5112 } 5113 5114 static int 5115 wpi_apm_init(struct wpi_softc *sc) 5116 { 5117 uint32_t reg; 5118 int error; 5119 5120 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5121 5122 /* Disable L0s exit timer (NMI bug workaround). */ 5123 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5124 /* Don't wait for ICH L0s (ICH bug workaround). */ 5125 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5126 5127 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5128 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5129 5130 /* Retrieve PCIe Active State Power Management (ASPM). */ 5131 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5132 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5133 if (reg & 0x02) /* L1 Entry enabled. */ 5134 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5135 else 5136 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5137 5138 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5139 5140 /* Wait for clock stabilization before accessing prph. */ 5141 if ((error = wpi_clock_wait(sc)) != 0) 5142 return error; 5143 5144 if ((error = wpi_nic_lock(sc)) != 0) 5145 return error; 5146 /* Cleanup. */ 5147 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5148 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5149 5150 /* Enable DMA and BSM (Bootstrap State Machine). */ 5151 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5152 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5153 DELAY(20); 5154 /* Disable L1-Active. */ 5155 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5156 wpi_nic_unlock(sc); 5157 5158 return 0; 5159 } 5160 5161 static void 5162 wpi_apm_stop_master(struct wpi_softc *sc) 5163 { 5164 int ntries; 5165 5166 /* Stop busmaster DMA activity. */ 5167 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5168 5169 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5170 WPI_GP_CNTRL_MAC_PS) 5171 return; /* Already asleep. */ 5172 5173 for (ntries = 0; ntries < 100; ntries++) { 5174 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5175 return; 5176 DELAY(10); 5177 } 5178 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5179 __func__); 5180 } 5181 5182 static void 5183 wpi_apm_stop(struct wpi_softc *sc) 5184 { 5185 wpi_apm_stop_master(sc); 5186 5187 /* Reset the entire device. */ 5188 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5189 DELAY(10); 5190 /* Clear "initialization complete" bit. */ 5191 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5192 } 5193 5194 static void 5195 wpi_nic_config(struct wpi_softc *sc) 5196 { 5197 uint32_t rev; 5198 5199 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5200 5201 /* voodoo from the Linux "driver".. */ 5202 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5203 if ((rev & 0xc0) == 0x40) 5204 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5205 else if (!(rev & 0x80)) 5206 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5207 5208 if (sc->cap == 0x80) 5209 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5210 5211 if ((sc->rev & 0xf0) == 0xd0) 5212 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5213 else 5214 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5215 5216 if (sc->type > 1) 5217 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5218 } 5219 5220 static int 5221 wpi_hw_init(struct wpi_softc *sc) 5222 { 5223 uint8_t chnl; 5224 int ntries, error; 5225 5226 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5227 5228 /* Clear pending interrupts. */ 5229 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5230 5231 if ((error = wpi_apm_init(sc)) != 0) { 5232 device_printf(sc->sc_dev, 5233 "%s: could not power ON adapter, error %d\n", __func__, 5234 error); 5235 return error; 5236 } 5237 5238 /* Select VMAIN power source. */ 5239 if ((error = wpi_nic_lock(sc)) != 0) 5240 return error; 5241 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5242 wpi_nic_unlock(sc); 5243 /* Spin until VMAIN gets selected. */ 5244 for (ntries = 0; ntries < 5000; ntries++) { 5245 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5246 break; 5247 DELAY(10); 5248 } 5249 if (ntries == 5000) { 5250 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5251 return ETIMEDOUT; 5252 } 5253 5254 /* Perform adapter initialization. */ 5255 wpi_nic_config(sc); 5256 5257 /* Initialize RX ring. */ 5258 if ((error = wpi_nic_lock(sc)) != 0) 5259 return error; 5260 /* Set physical address of RX ring. */ 5261 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5262 /* Set physical address of RX read pointer. */ 5263 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5264 offsetof(struct wpi_shared, next)); 5265 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5266 /* Enable RX. */ 5267 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5268 WPI_FH_RX_CONFIG_DMA_ENA | 5269 WPI_FH_RX_CONFIG_RDRBD_ENA | 5270 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5271 WPI_FH_RX_CONFIG_MAXFRAG | 5272 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5273 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5274 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5275 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5276 wpi_nic_unlock(sc); 5277 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5278 5279 /* Initialize TX rings. */ 5280 if ((error = wpi_nic_lock(sc)) != 0) 5281 return error; 5282 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5283 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5284 /* Enable all 6 TX rings. */ 5285 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5286 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5287 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5288 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5289 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5290 /* Set physical address of TX rings. */ 5291 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5292 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5293 5294 /* Enable all DMA channels. */ 5295 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5296 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5297 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5298 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5299 } 5300 wpi_nic_unlock(sc); 5301 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5302 5303 /* Clear "radio off" and "commands blocked" bits. */ 5304 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5305 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5306 5307 /* Clear pending interrupts. */ 5308 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5309 /* Enable interrupts. */ 5310 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5311 5312 /* _Really_ make sure "radio off" bit is cleared! */ 5313 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5314 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5315 5316 if ((error = wpi_load_firmware(sc)) != 0) { 5317 device_printf(sc->sc_dev, 5318 "%s: could not load firmware, error %d\n", __func__, 5319 error); 5320 return error; 5321 } 5322 /* Wait at most one second for firmware alive notification. */ 5323 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5324 device_printf(sc->sc_dev, 5325 "%s: timeout waiting for adapter to initialize, error %d\n", 5326 __func__, error); 5327 return error; 5328 } 5329 5330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5331 5332 /* Do post-firmware initialization. */ 5333 return wpi_post_alive(sc); 5334 } 5335 5336 static void 5337 wpi_hw_stop(struct wpi_softc *sc) 5338 { 5339 uint8_t chnl, qid; 5340 int ntries; 5341 5342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5343 5344 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5345 wpi_nic_lock(sc); 5346 5347 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5348 5349 /* Disable interrupts. */ 5350 WPI_WRITE(sc, WPI_INT_MASK, 0); 5351 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5352 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5353 5354 /* Make sure we no longer hold the NIC lock. */ 5355 wpi_nic_unlock(sc); 5356 5357 if (wpi_nic_lock(sc) == 0) { 5358 /* Stop TX scheduler. */ 5359 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5360 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5361 5362 /* Stop all DMA channels. */ 5363 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5364 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5365 for (ntries = 0; ntries < 200; ntries++) { 5366 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5367 WPI_FH_TX_STATUS_IDLE(chnl)) 5368 break; 5369 DELAY(10); 5370 } 5371 } 5372 wpi_nic_unlock(sc); 5373 } 5374 5375 /* Stop RX ring. */ 5376 wpi_reset_rx_ring(sc); 5377 5378 /* Reset all TX rings. */ 5379 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5380 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5381 5382 if (wpi_nic_lock(sc) == 0) { 5383 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5384 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5385 wpi_nic_unlock(sc); 5386 } 5387 DELAY(5); 5388 /* Power OFF adapter. */ 5389 wpi_apm_stop(sc); 5390 } 5391 5392 static void 5393 wpi_radio_on(void *arg0, int pending) 5394 { 5395 struct wpi_softc *sc = arg0; 5396 struct ieee80211com *ic = &sc->sc_ic; 5397 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5398 5399 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5400 5401 WPI_LOCK(sc); 5402 callout_stop(&sc->watchdog_rfkill); 5403 WPI_UNLOCK(sc); 5404 5405 if (vap != NULL) 5406 ieee80211_init(vap); 5407 } 5408 5409 static void 5410 wpi_radio_off(void *arg0, int pending) 5411 { 5412 struct wpi_softc *sc = arg0; 5413 struct ieee80211com *ic = &sc->sc_ic; 5414 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5415 5416 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5417 5418 ieee80211_notify_radio(ic, 0); 5419 wpi_stop(sc); 5420 if (vap != NULL) 5421 ieee80211_stop(vap); 5422 5423 WPI_LOCK(sc); 5424 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5425 WPI_UNLOCK(sc); 5426 } 5427 5428 static int 5429 wpi_init(struct wpi_softc *sc) 5430 { 5431 int error = 0; 5432 5433 WPI_LOCK(sc); 5434 5435 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5436 5437 if (sc->sc_running != 0) 5438 goto end; 5439 5440 /* Check that the radio is not disabled by hardware switch. */ 5441 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5442 device_printf(sc->sc_dev, 5443 "RF switch: radio disabled (%s)\n", __func__); 5444 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5445 sc); 5446 error = EINPROGRESS; 5447 goto end; 5448 } 5449 5450 /* Read firmware images from the filesystem. */ 5451 if ((error = wpi_read_firmware(sc)) != 0) { 5452 device_printf(sc->sc_dev, 5453 "%s: could not read firmware, error %d\n", __func__, 5454 error); 5455 goto end; 5456 } 5457 5458 sc->sc_running = 1; 5459 5460 /* Initialize hardware and upload firmware. */ 5461 error = wpi_hw_init(sc); 5462 wpi_unload_firmware(sc); 5463 if (error != 0) { 5464 device_printf(sc->sc_dev, 5465 "%s: could not initialize hardware, error %d\n", __func__, 5466 error); 5467 goto fail; 5468 } 5469 5470 /* Configure adapter now that it is ready. */ 5471 if ((error = wpi_config(sc)) != 0) { 5472 device_printf(sc->sc_dev, 5473 "%s: could not configure device, error %d\n", __func__, 5474 error); 5475 goto fail; 5476 } 5477 5478 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5479 5480 WPI_UNLOCK(sc); 5481 5482 return 0; 5483 5484 fail: wpi_stop_locked(sc); 5485 5486 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5487 WPI_UNLOCK(sc); 5488 5489 return error; 5490 } 5491 5492 static void 5493 wpi_stop_locked(struct wpi_softc *sc) 5494 { 5495 5496 WPI_LOCK_ASSERT(sc); 5497 5498 if (sc->sc_running == 0) 5499 return; 5500 5501 WPI_TX_LOCK(sc); 5502 WPI_TXQ_LOCK(sc); 5503 sc->sc_running = 0; 5504 WPI_TXQ_UNLOCK(sc); 5505 WPI_TX_UNLOCK(sc); 5506 5507 WPI_TXQ_STATE_LOCK(sc); 5508 callout_stop(&sc->tx_timeout); 5509 WPI_TXQ_STATE_UNLOCK(sc); 5510 5511 WPI_RXON_LOCK(sc); 5512 callout_stop(&sc->scan_timeout); 5513 callout_stop(&sc->calib_to); 5514 WPI_RXON_UNLOCK(sc); 5515 5516 /* Power OFF hardware. */ 5517 wpi_hw_stop(sc); 5518 } 5519 5520 static void 5521 wpi_stop(struct wpi_softc *sc) 5522 { 5523 WPI_LOCK(sc); 5524 wpi_stop_locked(sc); 5525 WPI_UNLOCK(sc); 5526 } 5527 5528 /* 5529 * Callback from net80211 to start a scan. 5530 */ 5531 static void 5532 wpi_scan_start(struct ieee80211com *ic) 5533 { 5534 struct wpi_softc *sc = ic->ic_softc; 5535 5536 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5537 } 5538 5539 /* 5540 * Callback from net80211 to terminate a scan. 5541 */ 5542 static void 5543 wpi_scan_end(struct ieee80211com *ic) 5544 { 5545 struct wpi_softc *sc = ic->ic_softc; 5546 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5547 5548 if (vap->iv_state == IEEE80211_S_RUN) 5549 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5550 } 5551 5552 /** 5553 * Called by the net80211 framework to indicate to the driver 5554 * that the channel should be changed 5555 */ 5556 static void 5557 wpi_set_channel(struct ieee80211com *ic) 5558 { 5559 const struct ieee80211_channel *c = ic->ic_curchan; 5560 struct wpi_softc *sc = ic->ic_softc; 5561 int error; 5562 5563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5564 5565 WPI_LOCK(sc); 5566 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5567 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5568 WPI_UNLOCK(sc); 5569 WPI_TX_LOCK(sc); 5570 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5571 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5572 WPI_TX_UNLOCK(sc); 5573 5574 /* 5575 * Only need to set the channel in Monitor mode. AP scanning and auth 5576 * are already taken care of by their respective firmware commands. 5577 */ 5578 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5579 WPI_RXON_LOCK(sc); 5580 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5581 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5582 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5583 WPI_RXON_24GHZ); 5584 } else { 5585 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5586 WPI_RXON_24GHZ); 5587 } 5588 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5589 device_printf(sc->sc_dev, 5590 "%s: error %d setting channel\n", __func__, 5591 error); 5592 WPI_RXON_UNLOCK(sc); 5593 } 5594 } 5595 5596 /** 5597 * Called by net80211 to indicate that we need to scan the current 5598 * channel. The channel is previously be set via the wpi_set_channel 5599 * callback. 5600 */ 5601 static void 5602 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5603 { 5604 struct ieee80211vap *vap = ss->ss_vap; 5605 struct ieee80211com *ic = vap->iv_ic; 5606 struct wpi_softc *sc = ic->ic_softc; 5607 int error; 5608 5609 WPI_RXON_LOCK(sc); 5610 error = wpi_scan(sc, ic->ic_curchan); 5611 WPI_RXON_UNLOCK(sc); 5612 if (error != 0) 5613 ieee80211_cancel_scan(vap); 5614 } 5615 5616 /** 5617 * Called by the net80211 framework to indicate 5618 * the minimum dwell time has been met, terminate the scan. 5619 * We don't actually terminate the scan as the firmware will notify 5620 * us when it's finished and we have no way to interrupt it. 5621 */ 5622 static void 5623 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5624 { 5625 /* NB: don't try to abort scan; wait for firmware to finish */ 5626 } 5627 5628 static void 5629 wpi_hw_reset(void *arg, int pending) 5630 { 5631 struct wpi_softc *sc = arg; 5632 struct ieee80211com *ic = &sc->sc_ic; 5633 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5634 5635 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5636 5637 ieee80211_notify_radio(ic, 0); 5638 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5639 ieee80211_cancel_scan(vap); 5640 5641 wpi_stop(sc); 5642 if (vap != NULL) { 5643 ieee80211_stop(vap); 5644 ieee80211_init(vap); 5645 } 5646 } 5647