1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 uint8_t); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 176 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 177 const uint8_t mac[IEEE80211_ADDR_LEN]); 178 static void wpi_node_free(struct ieee80211_node *); 179 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 180 const struct ieee80211_rx_stats *, 181 int, int); 182 static void wpi_restore_node(void *, struct ieee80211_node *); 183 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 184 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 185 static void wpi_calib_timeout(void *); 186 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 187 struct wpi_rx_data *); 188 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 189 struct wpi_rx_data *); 190 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 191 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_notif_intr(struct wpi_softc *); 193 static void wpi_wakeup_intr(struct wpi_softc *); 194 #ifdef WPI_DEBUG 195 static void wpi_debug_registers(struct wpi_softc *); 196 #endif 197 static void wpi_fatal_intr(struct wpi_softc *); 198 static void wpi_intr(void *); 199 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 209 static void wpi_watchdog_rfkill(void *); 210 static void wpi_scan_timeout(void *); 211 static void wpi_tx_timeout(void *); 212 static void wpi_parent(struct ieee80211com *); 213 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 214 int); 215 static int wpi_mrr_setup(struct wpi_softc *); 216 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 217 static int wpi_add_broadcast_node(struct wpi_softc *, int); 218 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 219 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 220 static int wpi_updateedca(struct ieee80211com *); 221 static void wpi_set_promisc(struct wpi_softc *); 222 static void wpi_update_promisc(struct ieee80211com *); 223 static void wpi_update_mcast(struct ieee80211com *); 224 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 225 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 226 static void wpi_power_calibration(struct wpi_softc *); 227 static int wpi_set_txpower(struct wpi_softc *, int); 228 static int wpi_get_power_index(struct wpi_softc *, 229 struct wpi_power_group *, uint8_t, int, int); 230 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 231 static int wpi_send_btcoex(struct wpi_softc *); 232 static int wpi_send_rxon(struct wpi_softc *, int, int); 233 static int wpi_config(struct wpi_softc *); 234 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 235 struct ieee80211_channel *, uint8_t); 236 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 237 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 238 struct ieee80211_channel *); 239 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 240 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 241 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 242 static int wpi_config_beacon(struct wpi_vap *); 243 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 244 static void wpi_update_beacon(struct ieee80211vap *, int); 245 static void wpi_newassoc(struct ieee80211_node *, int); 246 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 247 static int wpi_load_key(struct ieee80211_node *, 248 const struct ieee80211_key *); 249 static void wpi_load_key_cb(void *, struct ieee80211_node *); 250 static int wpi_set_global_keys(struct ieee80211_node *); 251 static int wpi_del_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_del_key_cb(void *, struct ieee80211_node *); 254 static int wpi_process_key(struct ieee80211vap *, 255 const struct ieee80211_key *, int); 256 static int wpi_key_set(struct ieee80211vap *, 257 const struct ieee80211_key *); 258 static int wpi_key_delete(struct ieee80211vap *, 259 const struct ieee80211_key *); 260 static int wpi_post_alive(struct wpi_softc *); 261 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 262 uint32_t); 263 static int wpi_load_firmware(struct wpi_softc *); 264 static int wpi_read_firmware(struct wpi_softc *); 265 static void wpi_unload_firmware(struct wpi_softc *); 266 static int wpi_clock_wait(struct wpi_softc *); 267 static int wpi_apm_init(struct wpi_softc *); 268 static void wpi_apm_stop_master(struct wpi_softc *); 269 static void wpi_apm_stop(struct wpi_softc *); 270 static void wpi_nic_config(struct wpi_softc *); 271 static int wpi_hw_init(struct wpi_softc *); 272 static void wpi_hw_stop(struct wpi_softc *); 273 static void wpi_radio_on(void *, int); 274 static void wpi_radio_off(void *, int); 275 static int wpi_init(struct wpi_softc *); 276 static void wpi_stop_locked(struct wpi_softc *); 277 static void wpi_stop(struct wpi_softc *); 278 static void wpi_scan_start(struct ieee80211com *); 279 static void wpi_scan_end(struct ieee80211com *); 280 static void wpi_set_channel(struct ieee80211com *); 281 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 282 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 283 static void wpi_hw_reset(void *, int); 284 285 static device_method_t wpi_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, wpi_probe), 288 DEVMETHOD(device_attach, wpi_attach), 289 DEVMETHOD(device_detach, wpi_detach), 290 DEVMETHOD(device_shutdown, wpi_shutdown), 291 DEVMETHOD(device_suspend, wpi_suspend), 292 DEVMETHOD(device_resume, wpi_resume), 293 294 DEVMETHOD_END 295 }; 296 297 static driver_t wpi_driver = { 298 "wpi", 299 wpi_methods, 300 sizeof (struct wpi_softc) 301 }; 302 static devclass_t wpi_devclass; 303 304 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 305 306 MODULE_VERSION(wpi, 1); 307 308 MODULE_DEPEND(wpi, pci, 1, 1, 1); 309 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 310 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 311 312 static int 313 wpi_probe(device_t dev) 314 { 315 const struct wpi_ident *ident; 316 317 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 318 if (pci_get_vendor(dev) == ident->vendor && 319 pci_get_device(dev) == ident->device) { 320 device_set_desc(dev, ident->name); 321 return (BUS_PROBE_DEFAULT); 322 } 323 } 324 return ENXIO; 325 } 326 327 static int 328 wpi_attach(device_t dev) 329 { 330 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 331 struct ieee80211com *ic; 332 uint8_t i; 333 int error, rid; 334 #ifdef WPI_DEBUG 335 int supportsa = 1; 336 const struct wpi_ident *ident; 337 #endif 338 339 sc->sc_dev = dev; 340 341 #ifdef WPI_DEBUG 342 error = resource_int_value(device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 344 if (error != 0) 345 sc->sc_debug = 0; 346 #else 347 sc->sc_debug = 0; 348 #endif 349 350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 351 352 /* 353 * Get the offset of the PCI Express Capability Structure in PCI 354 * Configuration Space. 355 */ 356 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 357 if (error != 0) { 358 device_printf(dev, "PCIe capability structure not found!\n"); 359 return error; 360 } 361 362 /* 363 * Some card's only support 802.11b/g not a, check to see if 364 * this is one such card. A 0x0 in the subdevice table indicates 365 * the entire subdevice range is to be ignored. 366 */ 367 #ifdef WPI_DEBUG 368 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 369 if (ident->subdevice && 370 pci_get_subdevice(dev) == ident->subdevice) { 371 supportsa = 0; 372 break; 373 } 374 } 375 #endif 376 377 /* Clear device-specific "PCI retry timeout" register (41h). */ 378 pci_write_config(dev, 0x41, 0, 1); 379 380 /* Enable bus-mastering. */ 381 pci_enable_busmaster(dev); 382 383 rid = PCIR_BAR(0); 384 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 385 RF_ACTIVE); 386 if (sc->mem == NULL) { 387 device_printf(dev, "can't map mem space\n"); 388 return ENOMEM; 389 } 390 sc->sc_st = rman_get_bustag(sc->mem); 391 sc->sc_sh = rman_get_bushandle(sc->mem); 392 393 rid = 1; 394 if (pci_alloc_msi(dev, &rid) == 0) 395 rid = 1; 396 else 397 rid = 0; 398 /* Install interrupt handler. */ 399 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 400 (rid != 0 ? 0 : RF_SHAREABLE)); 401 if (sc->irq == NULL) { 402 device_printf(dev, "can't map interrupt\n"); 403 error = ENOMEM; 404 goto fail; 405 } 406 407 WPI_LOCK_INIT(sc); 408 WPI_TX_LOCK_INIT(sc); 409 WPI_RXON_LOCK_INIT(sc); 410 WPI_NT_LOCK_INIT(sc); 411 WPI_TXQ_LOCK_INIT(sc); 412 WPI_TXQ_STATE_LOCK_INIT(sc); 413 414 /* Allocate DMA memory for firmware transfers. */ 415 if ((error = wpi_alloc_fwmem(sc)) != 0) { 416 device_printf(dev, 417 "could not allocate memory for firmware, error %d\n", 418 error); 419 goto fail; 420 } 421 422 /* Allocate shared page. */ 423 if ((error = wpi_alloc_shared(sc)) != 0) { 424 device_printf(dev, "could not allocate shared page\n"); 425 goto fail; 426 } 427 428 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 429 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 430 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 431 device_printf(dev, 432 "could not allocate TX ring %d, error %d\n", i, 433 error); 434 goto fail; 435 } 436 } 437 438 /* Allocate RX ring. */ 439 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 440 device_printf(dev, "could not allocate RX ring, error %d\n", 441 error); 442 goto fail; 443 } 444 445 /* Clear pending interrupts. */ 446 WPI_WRITE(sc, WPI_INT, 0xffffffff); 447 448 ic = &sc->sc_ic; 449 ic->ic_softc = sc; 450 ic->ic_name = device_get_nameunit(dev); 451 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 452 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 453 454 /* Set device capabilities. */ 455 ic->ic_caps = 456 IEEE80211_C_STA /* station mode supported */ 457 | IEEE80211_C_IBSS /* IBSS mode supported */ 458 | IEEE80211_C_HOSTAP /* Host access point mode */ 459 | IEEE80211_C_MONITOR /* monitor mode supported */ 460 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 461 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 462 | IEEE80211_C_TXFRAG /* handle tx frags */ 463 | IEEE80211_C_TXPMGT /* tx power management */ 464 | IEEE80211_C_SHSLOT /* short slot time supported */ 465 | IEEE80211_C_WPA /* 802.11i */ 466 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 467 | IEEE80211_C_WME /* 802.11e */ 468 | IEEE80211_C_PMGT /* Station-side power mgmt */ 469 ; 470 471 ic->ic_cryptocaps = 472 IEEE80211_CRYPTO_AES_CCM; 473 474 /* 475 * Read in the eeprom and also setup the channels for 476 * net80211. We don't set the rates as net80211 does this for us 477 */ 478 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 479 device_printf(dev, "could not read EEPROM, error %d\n", 480 error); 481 goto fail; 482 } 483 484 #ifdef WPI_DEBUG 485 if (bootverbose) { 486 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 487 sc->domain); 488 device_printf(sc->sc_dev, "Hardware Type: %c\n", 489 sc->type > 1 ? 'B': '?'); 490 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 491 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 492 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 493 supportsa ? "does" : "does not"); 494 495 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 496 check what sc->rev really represents - benjsc 20070615 */ 497 } 498 #endif 499 500 ieee80211_ifattach(ic); 501 ic->ic_vap_create = wpi_vap_create; 502 ic->ic_vap_delete = wpi_vap_delete; 503 ic->ic_parent = wpi_parent; 504 ic->ic_raw_xmit = wpi_raw_xmit; 505 ic->ic_transmit = wpi_transmit; 506 ic->ic_node_alloc = wpi_node_alloc; 507 sc->sc_node_free = ic->ic_node_free; 508 ic->ic_node_free = wpi_node_free; 509 ic->ic_wme.wme_update = wpi_updateedca; 510 ic->ic_update_promisc = wpi_update_promisc; 511 ic->ic_update_mcast = wpi_update_mcast; 512 ic->ic_newassoc = wpi_newassoc; 513 ic->ic_scan_start = wpi_scan_start; 514 ic->ic_scan_end = wpi_scan_end; 515 ic->ic_set_channel = wpi_set_channel; 516 ic->ic_scan_curchan = wpi_scan_curchan; 517 ic->ic_scan_mindwell = wpi_scan_mindwell; 518 ic->ic_setregdomain = wpi_setregdomain; 519 520 sc->sc_update_rx_ring = wpi_update_rx_ring; 521 sc->sc_update_tx_ring = wpi_update_tx_ring; 522 523 wpi_radiotap_attach(sc); 524 525 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 526 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 528 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 529 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 530 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 531 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 532 533 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 534 taskqueue_thread_enqueue, &sc->sc_tq); 535 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 536 if (error != 0) { 537 device_printf(dev, "can't start threads, error %d\n", error); 538 goto fail; 539 } 540 541 wpi_sysctlattach(sc); 542 543 /* 544 * Hook our interrupt after all initialization is complete. 545 */ 546 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 547 NULL, wpi_intr, sc, &sc->sc_ih); 548 if (error != 0) { 549 device_printf(dev, "can't establish interrupt, error %d\n", 550 error); 551 goto fail; 552 } 553 554 if (bootverbose) 555 ieee80211_announce(ic); 556 557 #ifdef WPI_DEBUG 558 if (sc->sc_debug & WPI_DEBUG_HW) 559 ieee80211_announce_channels(ic); 560 #endif 561 562 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 563 return 0; 564 565 fail: wpi_detach(dev); 566 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 567 return error; 568 } 569 570 /* 571 * Attach the interface to 802.11 radiotap. 572 */ 573 static void 574 wpi_radiotap_attach(struct wpi_softc *sc) 575 { 576 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 577 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 578 579 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 580 ieee80211_radiotap_attach(&sc->sc_ic, 581 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 582 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 583 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 584 } 585 586 static void 587 wpi_sysctlattach(struct wpi_softc *sc) 588 { 589 #ifdef WPI_DEBUG 590 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 591 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 592 593 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 594 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 595 "control debugging printfs"); 596 #endif 597 } 598 599 static void 600 wpi_init_beacon(struct wpi_vap *wvp) 601 { 602 struct wpi_buf *bcn = &wvp->wv_bcbuf; 603 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 604 605 cmd->id = WPI_ID_BROADCAST; 606 cmd->ofdm_mask = 0xff; 607 cmd->cck_mask = 0x0f; 608 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 609 610 /* 611 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 612 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 613 */ 614 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 615 616 bcn->code = WPI_CMD_SET_BEACON; 617 bcn->ac = WPI_CMD_QUEUE_NUM; 618 bcn->size = sizeof(struct wpi_cmd_beacon); 619 } 620 621 static struct ieee80211vap * 622 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 623 enum ieee80211_opmode opmode, int flags, 624 const uint8_t bssid[IEEE80211_ADDR_LEN], 625 const uint8_t mac[IEEE80211_ADDR_LEN]) 626 { 627 struct wpi_vap *wvp; 628 struct ieee80211vap *vap; 629 630 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 631 return NULL; 632 633 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 634 vap = &wvp->wv_vap; 635 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 636 637 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 638 WPI_VAP_LOCK_INIT(wvp); 639 wpi_init_beacon(wvp); 640 } 641 642 /* Override with driver methods. */ 643 vap->iv_key_set = wpi_key_set; 644 vap->iv_key_delete = wpi_key_delete; 645 if (opmode == IEEE80211_M_IBSS) { 646 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 647 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 648 } 649 wvp->wv_newstate = vap->iv_newstate; 650 vap->iv_newstate = wpi_newstate; 651 vap->iv_update_beacon = wpi_update_beacon; 652 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 653 654 ieee80211_ratectl_init(vap); 655 /* Complete setup. */ 656 ieee80211_vap_attach(vap, ieee80211_media_change, 657 ieee80211_media_status, mac); 658 ic->ic_opmode = opmode; 659 return vap; 660 } 661 662 static void 663 wpi_vap_delete(struct ieee80211vap *vap) 664 { 665 struct wpi_vap *wvp = WPI_VAP(vap); 666 struct wpi_buf *bcn = &wvp->wv_bcbuf; 667 enum ieee80211_opmode opmode = vap->iv_opmode; 668 669 ieee80211_ratectl_deinit(vap); 670 ieee80211_vap_detach(vap); 671 672 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 673 if (bcn->m != NULL) 674 m_freem(bcn->m); 675 676 WPI_VAP_LOCK_DESTROY(wvp); 677 } 678 679 free(wvp, M_80211_VAP); 680 } 681 682 static int 683 wpi_detach(device_t dev) 684 { 685 struct wpi_softc *sc = device_get_softc(dev); 686 struct ieee80211com *ic = &sc->sc_ic; 687 uint8_t qid; 688 689 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 690 691 if (ic->ic_vap_create == wpi_vap_create) { 692 ieee80211_draintask(ic, &sc->sc_radioon_task); 693 694 wpi_stop(sc); 695 696 if (sc->sc_tq != NULL) { 697 taskqueue_drain_all(sc->sc_tq); 698 taskqueue_free(sc->sc_tq); 699 } 700 701 callout_drain(&sc->watchdog_rfkill); 702 callout_drain(&sc->tx_timeout); 703 callout_drain(&sc->scan_timeout); 704 callout_drain(&sc->calib_to); 705 ieee80211_ifdetach(ic); 706 } 707 708 /* Uninstall interrupt handler. */ 709 if (sc->irq != NULL) { 710 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 711 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 712 sc->irq); 713 pci_release_msi(dev); 714 } 715 716 if (sc->txq[0].data_dmat) { 717 /* Free DMA resources. */ 718 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 719 wpi_free_tx_ring(sc, &sc->txq[qid]); 720 721 wpi_free_rx_ring(sc); 722 wpi_free_shared(sc); 723 } 724 725 if (sc->fw_dma.tag) 726 wpi_free_fwmem(sc); 727 728 if (sc->mem != NULL) 729 bus_release_resource(dev, SYS_RES_MEMORY, 730 rman_get_rid(sc->mem), sc->mem); 731 732 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 733 WPI_TXQ_STATE_LOCK_DESTROY(sc); 734 WPI_TXQ_LOCK_DESTROY(sc); 735 WPI_NT_LOCK_DESTROY(sc); 736 WPI_RXON_LOCK_DESTROY(sc); 737 WPI_TX_LOCK_DESTROY(sc); 738 WPI_LOCK_DESTROY(sc); 739 return 0; 740 } 741 742 static int 743 wpi_shutdown(device_t dev) 744 { 745 struct wpi_softc *sc = device_get_softc(dev); 746 747 wpi_stop(sc); 748 return 0; 749 } 750 751 static int 752 wpi_suspend(device_t dev) 753 { 754 struct wpi_softc *sc = device_get_softc(dev); 755 struct ieee80211com *ic = &sc->sc_ic; 756 757 ieee80211_suspend_all(ic); 758 return 0; 759 } 760 761 static int 762 wpi_resume(device_t dev) 763 { 764 struct wpi_softc *sc = device_get_softc(dev); 765 struct ieee80211com *ic = &sc->sc_ic; 766 767 /* Clear device-specific "PCI retry timeout" register (41h). */ 768 pci_write_config(dev, 0x41, 0, 1); 769 770 ieee80211_resume_all(ic); 771 return 0; 772 } 773 774 /* 775 * Grab exclusive access to NIC memory. 776 */ 777 static int 778 wpi_nic_lock(struct wpi_softc *sc) 779 { 780 int ntries; 781 782 /* Request exclusive access to NIC. */ 783 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 784 785 /* Spin until we actually get the lock. */ 786 for (ntries = 0; ntries < 1000; ntries++) { 787 if ((WPI_READ(sc, WPI_GP_CNTRL) & 788 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 789 WPI_GP_CNTRL_MAC_ACCESS_ENA) 790 return 0; 791 DELAY(10); 792 } 793 794 device_printf(sc->sc_dev, "could not lock memory\n"); 795 796 return ETIMEDOUT; 797 } 798 799 /* 800 * Release lock on NIC memory. 801 */ 802 static __inline void 803 wpi_nic_unlock(struct wpi_softc *sc) 804 { 805 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 806 } 807 808 static __inline uint32_t 809 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 810 { 811 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 812 WPI_BARRIER_READ_WRITE(sc); 813 return WPI_READ(sc, WPI_PRPH_RDATA); 814 } 815 816 static __inline void 817 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 818 { 819 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 820 WPI_BARRIER_WRITE(sc); 821 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 822 } 823 824 static __inline void 825 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 826 { 827 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 828 } 829 830 static __inline void 831 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 832 { 833 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 834 } 835 836 static __inline void 837 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 838 const uint32_t *data, uint32_t count) 839 { 840 for (; count != 0; count--, data++, addr += 4) 841 wpi_prph_write(sc, addr, *data); 842 } 843 844 static __inline uint32_t 845 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 846 { 847 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 848 WPI_BARRIER_READ_WRITE(sc); 849 return WPI_READ(sc, WPI_MEM_RDATA); 850 } 851 852 static __inline void 853 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 854 int count) 855 { 856 for (; count > 0; count--, addr += 4) 857 *data++ = wpi_mem_read(sc, addr); 858 } 859 860 static int 861 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 862 { 863 uint8_t *out = data; 864 uint32_t val; 865 int error, ntries; 866 867 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 868 869 if ((error = wpi_nic_lock(sc)) != 0) 870 return error; 871 872 for (; count > 0; count -= 2, addr++) { 873 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 874 for (ntries = 0; ntries < 10; ntries++) { 875 val = WPI_READ(sc, WPI_EEPROM); 876 if (val & WPI_EEPROM_READ_VALID) 877 break; 878 DELAY(5); 879 } 880 if (ntries == 10) { 881 device_printf(sc->sc_dev, 882 "timeout reading ROM at 0x%x\n", addr); 883 return ETIMEDOUT; 884 } 885 *out++= val >> 16; 886 if (count > 1) 887 *out ++= val >> 24; 888 } 889 890 wpi_nic_unlock(sc); 891 892 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 893 894 return 0; 895 } 896 897 static void 898 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 899 { 900 if (error != 0) 901 return; 902 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 903 *(bus_addr_t *)arg = segs[0].ds_addr; 904 } 905 906 /* 907 * Allocates a contiguous block of dma memory of the requested size and 908 * alignment. 909 */ 910 static int 911 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 912 void **kvap, bus_size_t size, bus_size_t alignment) 913 { 914 int error; 915 916 dma->tag = NULL; 917 dma->size = size; 918 919 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 920 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 921 1, size, 0, NULL, NULL, &dma->tag); 922 if (error != 0) 923 goto fail; 924 925 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 926 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 927 if (error != 0) 928 goto fail; 929 930 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 931 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 932 if (error != 0) 933 goto fail; 934 935 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 936 937 if (kvap != NULL) 938 *kvap = dma->vaddr; 939 940 return 0; 941 942 fail: wpi_dma_contig_free(dma); 943 return error; 944 } 945 946 static void 947 wpi_dma_contig_free(struct wpi_dma_info *dma) 948 { 949 if (dma->vaddr != NULL) { 950 bus_dmamap_sync(dma->tag, dma->map, 951 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 952 bus_dmamap_unload(dma->tag, dma->map); 953 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 954 dma->vaddr = NULL; 955 } 956 if (dma->tag != NULL) { 957 bus_dma_tag_destroy(dma->tag); 958 dma->tag = NULL; 959 } 960 } 961 962 /* 963 * Allocate a shared page between host and NIC. 964 */ 965 static int 966 wpi_alloc_shared(struct wpi_softc *sc) 967 { 968 /* Shared buffer must be aligned on a 4KB boundary. */ 969 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 970 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 971 } 972 973 static void 974 wpi_free_shared(struct wpi_softc *sc) 975 { 976 wpi_dma_contig_free(&sc->shared_dma); 977 } 978 979 /* 980 * Allocate DMA-safe memory for firmware transfer. 981 */ 982 static int 983 wpi_alloc_fwmem(struct wpi_softc *sc) 984 { 985 /* Must be aligned on a 16-byte boundary. */ 986 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 987 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 988 } 989 990 static void 991 wpi_free_fwmem(struct wpi_softc *sc) 992 { 993 wpi_dma_contig_free(&sc->fw_dma); 994 } 995 996 static int 997 wpi_alloc_rx_ring(struct wpi_softc *sc) 998 { 999 struct wpi_rx_ring *ring = &sc->rxq; 1000 bus_size_t size; 1001 int i, error; 1002 1003 ring->cur = 0; 1004 ring->update = 0; 1005 1006 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1007 1008 /* Allocate RX descriptors (16KB aligned.) */ 1009 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1010 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1011 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1012 if (error != 0) { 1013 device_printf(sc->sc_dev, 1014 "%s: could not allocate RX ring DMA memory, error %d\n", 1015 __func__, error); 1016 goto fail; 1017 } 1018 1019 /* Create RX buffer DMA tag. */ 1020 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1021 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1022 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1023 if (error != 0) { 1024 device_printf(sc->sc_dev, 1025 "%s: could not create RX buf DMA tag, error %d\n", 1026 __func__, error); 1027 goto fail; 1028 } 1029 1030 /* 1031 * Allocate and map RX buffers. 1032 */ 1033 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1034 struct wpi_rx_data *data = &ring->data[i]; 1035 bus_addr_t paddr; 1036 1037 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1038 if (error != 0) { 1039 device_printf(sc->sc_dev, 1040 "%s: could not create RX buf DMA map, error %d\n", 1041 __func__, error); 1042 goto fail; 1043 } 1044 1045 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1046 if (data->m == NULL) { 1047 device_printf(sc->sc_dev, 1048 "%s: could not allocate RX mbuf\n", __func__); 1049 error = ENOBUFS; 1050 goto fail; 1051 } 1052 1053 error = bus_dmamap_load(ring->data_dmat, data->map, 1054 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1055 &paddr, BUS_DMA_NOWAIT); 1056 if (error != 0 && error != EFBIG) { 1057 device_printf(sc->sc_dev, 1058 "%s: can't map mbuf (error %d)\n", __func__, 1059 error); 1060 goto fail; 1061 } 1062 1063 /* Set physical address of RX buffer. */ 1064 ring->desc[i] = htole32(paddr); 1065 } 1066 1067 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1068 BUS_DMASYNC_PREWRITE); 1069 1070 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1071 1072 return 0; 1073 1074 fail: wpi_free_rx_ring(sc); 1075 1076 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1077 1078 return error; 1079 } 1080 1081 static void 1082 wpi_update_rx_ring(struct wpi_softc *sc) 1083 { 1084 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1085 } 1086 1087 static void 1088 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1089 { 1090 struct wpi_rx_ring *ring = &sc->rxq; 1091 1092 if (ring->update != 0) { 1093 /* Wait for INT_WAKEUP event. */ 1094 return; 1095 } 1096 1097 WPI_TXQ_LOCK(sc); 1098 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1099 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1100 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1101 __func__); 1102 ring->update = 1; 1103 } else { 1104 wpi_update_rx_ring(sc); 1105 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1106 } 1107 WPI_TXQ_UNLOCK(sc); 1108 } 1109 1110 static void 1111 wpi_reset_rx_ring(struct wpi_softc *sc) 1112 { 1113 struct wpi_rx_ring *ring = &sc->rxq; 1114 int ntries; 1115 1116 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1117 1118 if (wpi_nic_lock(sc) == 0) { 1119 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1120 for (ntries = 0; ntries < 1000; ntries++) { 1121 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1122 WPI_FH_RX_STATUS_IDLE) 1123 break; 1124 DELAY(10); 1125 } 1126 wpi_nic_unlock(sc); 1127 } 1128 1129 ring->cur = 0; 1130 ring->update = 0; 1131 } 1132 1133 static void 1134 wpi_free_rx_ring(struct wpi_softc *sc) 1135 { 1136 struct wpi_rx_ring *ring = &sc->rxq; 1137 int i; 1138 1139 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1140 1141 wpi_dma_contig_free(&ring->desc_dma); 1142 1143 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1144 struct wpi_rx_data *data = &ring->data[i]; 1145 1146 if (data->m != NULL) { 1147 bus_dmamap_sync(ring->data_dmat, data->map, 1148 BUS_DMASYNC_POSTREAD); 1149 bus_dmamap_unload(ring->data_dmat, data->map); 1150 m_freem(data->m); 1151 data->m = NULL; 1152 } 1153 if (data->map != NULL) 1154 bus_dmamap_destroy(ring->data_dmat, data->map); 1155 } 1156 if (ring->data_dmat != NULL) { 1157 bus_dma_tag_destroy(ring->data_dmat); 1158 ring->data_dmat = NULL; 1159 } 1160 } 1161 1162 static int 1163 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1164 { 1165 bus_addr_t paddr; 1166 bus_size_t size; 1167 int i, error; 1168 1169 ring->qid = qid; 1170 ring->queued = 0; 1171 ring->cur = 0; 1172 ring->pending = 0; 1173 ring->update = 0; 1174 1175 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1176 1177 /* Allocate TX descriptors (16KB aligned.) */ 1178 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1179 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1180 size, WPI_RING_DMA_ALIGN); 1181 if (error != 0) { 1182 device_printf(sc->sc_dev, 1183 "%s: could not allocate TX ring DMA memory, error %d\n", 1184 __func__, error); 1185 goto fail; 1186 } 1187 1188 /* Update shared area with ring physical address. */ 1189 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1190 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1191 BUS_DMASYNC_PREWRITE); 1192 1193 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1194 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1195 size, 4); 1196 if (error != 0) { 1197 device_printf(sc->sc_dev, 1198 "%s: could not allocate TX cmd DMA memory, error %d\n", 1199 __func__, error); 1200 goto fail; 1201 } 1202 1203 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1204 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1205 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1206 if (error != 0) { 1207 device_printf(sc->sc_dev, 1208 "%s: could not create TX buf DMA tag, error %d\n", 1209 __func__, error); 1210 goto fail; 1211 } 1212 1213 paddr = ring->cmd_dma.paddr; 1214 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1215 struct wpi_tx_data *data = &ring->data[i]; 1216 1217 data->cmd_paddr = paddr; 1218 paddr += sizeof (struct wpi_tx_cmd); 1219 1220 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1221 if (error != 0) { 1222 device_printf(sc->sc_dev, 1223 "%s: could not create TX buf DMA map, error %d\n", 1224 __func__, error); 1225 goto fail; 1226 } 1227 } 1228 1229 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1230 1231 return 0; 1232 1233 fail: wpi_free_tx_ring(sc, ring); 1234 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1235 return error; 1236 } 1237 1238 static void 1239 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1240 { 1241 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1242 } 1243 1244 static void 1245 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1246 { 1247 1248 if (ring->update != 0) { 1249 /* Wait for INT_WAKEUP event. */ 1250 return; 1251 } 1252 1253 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1254 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1255 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1256 __func__, ring->qid); 1257 ring->update = 1; 1258 } else { 1259 wpi_update_tx_ring(sc, ring); 1260 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1261 } 1262 } 1263 1264 static void 1265 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1266 { 1267 int i; 1268 1269 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1270 1271 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1272 struct wpi_tx_data *data = &ring->data[i]; 1273 1274 if (data->m != NULL) { 1275 bus_dmamap_sync(ring->data_dmat, data->map, 1276 BUS_DMASYNC_POSTWRITE); 1277 bus_dmamap_unload(ring->data_dmat, data->map); 1278 m_freem(data->m); 1279 data->m = NULL; 1280 } 1281 if (data->ni != NULL) { 1282 ieee80211_free_node(data->ni); 1283 data->ni = NULL; 1284 } 1285 } 1286 /* Clear TX descriptors. */ 1287 memset(ring->desc, 0, ring->desc_dma.size); 1288 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1289 BUS_DMASYNC_PREWRITE); 1290 ring->queued = 0; 1291 ring->cur = 0; 1292 ring->pending = 0; 1293 ring->update = 0; 1294 } 1295 1296 static void 1297 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1298 { 1299 int i; 1300 1301 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1302 1303 wpi_dma_contig_free(&ring->desc_dma); 1304 wpi_dma_contig_free(&ring->cmd_dma); 1305 1306 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1307 struct wpi_tx_data *data = &ring->data[i]; 1308 1309 if (data->m != NULL) { 1310 bus_dmamap_sync(ring->data_dmat, data->map, 1311 BUS_DMASYNC_POSTWRITE); 1312 bus_dmamap_unload(ring->data_dmat, data->map); 1313 m_freem(data->m); 1314 } 1315 if (data->map != NULL) 1316 bus_dmamap_destroy(ring->data_dmat, data->map); 1317 } 1318 if (ring->data_dmat != NULL) { 1319 bus_dma_tag_destroy(ring->data_dmat); 1320 ring->data_dmat = NULL; 1321 } 1322 } 1323 1324 /* 1325 * Extract various information from EEPROM. 1326 */ 1327 static int 1328 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1329 { 1330 #define WPI_CHK(res) do { \ 1331 if ((error = res) != 0) \ 1332 goto fail; \ 1333 } while (0) 1334 uint8_t i; 1335 int error; 1336 1337 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1338 1339 /* Adapter has to be powered on for EEPROM access to work. */ 1340 if ((error = wpi_apm_init(sc)) != 0) { 1341 device_printf(sc->sc_dev, 1342 "%s: could not power ON adapter, error %d\n", __func__, 1343 error); 1344 return error; 1345 } 1346 1347 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1348 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1349 error = EIO; 1350 goto fail; 1351 } 1352 /* Clear HW ownership of EEPROM. */ 1353 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1354 1355 /* Read the hardware capabilities, revision and SKU type. */ 1356 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1357 sizeof(sc->cap))); 1358 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1359 sizeof(sc->rev))); 1360 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1361 sizeof(sc->type))); 1362 1363 sc->rev = le16toh(sc->rev); 1364 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1365 sc->rev, sc->type); 1366 1367 /* Read the regulatory domain (4 ASCII characters.) */ 1368 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1369 sizeof(sc->domain))); 1370 1371 /* Read MAC address. */ 1372 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1373 IEEE80211_ADDR_LEN)); 1374 1375 /* Read the list of authorized channels. */ 1376 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1377 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1378 1379 /* Read the list of TX power groups. */ 1380 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1381 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1382 1383 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1384 1385 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1386 __func__); 1387 1388 return error; 1389 #undef WPI_CHK 1390 } 1391 1392 /* 1393 * Translate EEPROM flags to net80211. 1394 */ 1395 static uint32_t 1396 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1397 { 1398 uint32_t nflags; 1399 1400 nflags = 0; 1401 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1402 nflags |= IEEE80211_CHAN_PASSIVE; 1403 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1404 nflags |= IEEE80211_CHAN_NOADHOC; 1405 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1406 nflags |= IEEE80211_CHAN_DFS; 1407 /* XXX apparently IBSS may still be marked */ 1408 nflags |= IEEE80211_CHAN_NOADHOC; 1409 } 1410 1411 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1412 if (nflags & IEEE80211_CHAN_NOADHOC) 1413 nflags |= IEEE80211_CHAN_NOHOSTAP; 1414 1415 return nflags; 1416 } 1417 1418 static void 1419 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n) 1420 { 1421 struct ieee80211com *ic = &sc->sc_ic; 1422 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1423 const struct wpi_chan_band *band = &wpi_bands[n]; 1424 struct ieee80211_channel *c; 1425 uint32_t nflags; 1426 uint8_t chan, i; 1427 1428 for (i = 0; i < band->nchan; i++) { 1429 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1430 DPRINTF(sc, WPI_DEBUG_EEPROM, 1431 "Channel Not Valid: %d, band %d\n", 1432 band->chan[i],n); 1433 continue; 1434 } 1435 1436 chan = band->chan[i]; 1437 nflags = wpi_eeprom_channel_flags(&channels[i]); 1438 1439 c = &ic->ic_channels[ic->ic_nchans++]; 1440 c->ic_ieee = chan; 1441 c->ic_maxregpower = channels[i].maxpwr; 1442 c->ic_maxpower = 2*c->ic_maxregpower; 1443 1444 if (n == 0) { /* 2GHz band */ 1445 c->ic_freq = ieee80211_ieee2mhz(chan, 1446 IEEE80211_CHAN_G); 1447 1448 /* G =>'s B is supported */ 1449 c->ic_flags = IEEE80211_CHAN_B | nflags; 1450 c = &ic->ic_channels[ic->ic_nchans++]; 1451 c[0] = c[-1]; 1452 c->ic_flags = IEEE80211_CHAN_G | nflags; 1453 } else { /* 5GHz band */ 1454 c->ic_freq = ieee80211_ieee2mhz(chan, 1455 IEEE80211_CHAN_A); 1456 1457 c->ic_flags = IEEE80211_CHAN_A | nflags; 1458 } 1459 1460 /* Save maximum allowed TX power for this channel. */ 1461 sc->maxpwr[chan] = channels[i].maxpwr; 1462 1463 DPRINTF(sc, WPI_DEBUG_EEPROM, 1464 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1465 " offset %d\n", chan, c->ic_freq, 1466 channels[i].flags, sc->maxpwr[chan], 1467 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1468 } 1469 } 1470 1471 /** 1472 * Read the eeprom to find out what channels are valid for the given 1473 * band and update net80211 with what we find. 1474 */ 1475 static int 1476 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1477 { 1478 struct ieee80211com *ic = &sc->sc_ic; 1479 const struct wpi_chan_band *band = &wpi_bands[n]; 1480 int error; 1481 1482 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1483 1484 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1485 band->nchan * sizeof (struct wpi_eeprom_chan)); 1486 if (error != 0) { 1487 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1488 return error; 1489 } 1490 1491 wpi_read_eeprom_band(sc, n); 1492 1493 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1494 1495 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1496 1497 return 0; 1498 } 1499 1500 static struct wpi_eeprom_chan * 1501 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1502 { 1503 int i, j; 1504 1505 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1506 for (i = 0; i < wpi_bands[j].nchan; i++) 1507 if (wpi_bands[j].chan[i] == c->ic_ieee) 1508 return &sc->eeprom_channels[j][i]; 1509 1510 return NULL; 1511 } 1512 1513 /* 1514 * Enforce flags read from EEPROM. 1515 */ 1516 static int 1517 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1518 int nchan, struct ieee80211_channel chans[]) 1519 { 1520 struct wpi_softc *sc = ic->ic_softc; 1521 int i; 1522 1523 for (i = 0; i < nchan; i++) { 1524 struct ieee80211_channel *c = &chans[i]; 1525 struct wpi_eeprom_chan *channel; 1526 1527 channel = wpi_find_eeprom_channel(sc, c); 1528 if (channel == NULL) { 1529 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1530 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1531 return EINVAL; 1532 } 1533 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1534 } 1535 1536 return 0; 1537 } 1538 1539 static int 1540 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1541 { 1542 struct wpi_power_group *group = &sc->groups[n]; 1543 struct wpi_eeprom_group rgroup; 1544 int i, error; 1545 1546 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1547 1548 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1549 &rgroup, sizeof rgroup)) != 0) { 1550 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1551 return error; 1552 } 1553 1554 /* Save TX power group information. */ 1555 group->chan = rgroup.chan; 1556 group->maxpwr = rgroup.maxpwr; 1557 /* Retrieve temperature at which the samples were taken. */ 1558 group->temp = (int16_t)le16toh(rgroup.temp); 1559 1560 DPRINTF(sc, WPI_DEBUG_EEPROM, 1561 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1562 group->maxpwr, group->temp); 1563 1564 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1565 group->samples[i].index = rgroup.samples[i].index; 1566 group->samples[i].power = rgroup.samples[i].power; 1567 1568 DPRINTF(sc, WPI_DEBUG_EEPROM, 1569 "\tsample %d: index=%d power=%d\n", i, 1570 group->samples[i].index, group->samples[i].power); 1571 } 1572 1573 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1574 1575 return 0; 1576 } 1577 1578 static __inline uint8_t 1579 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1580 { 1581 uint8_t newid = WPI_ID_IBSS_MIN; 1582 1583 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1584 if ((sc->nodesmsk & (1 << newid)) == 0) { 1585 sc->nodesmsk |= 1 << newid; 1586 return newid; 1587 } 1588 } 1589 1590 return WPI_ID_UNDEFINED; 1591 } 1592 1593 static __inline uint8_t 1594 wpi_add_node_entry_sta(struct wpi_softc *sc) 1595 { 1596 sc->nodesmsk |= 1 << WPI_ID_BSS; 1597 1598 return WPI_ID_BSS; 1599 } 1600 1601 static __inline int 1602 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1603 { 1604 if (id == WPI_ID_UNDEFINED) 1605 return 0; 1606 1607 return (sc->nodesmsk >> id) & 1; 1608 } 1609 1610 static __inline void 1611 wpi_clear_node_table(struct wpi_softc *sc) 1612 { 1613 sc->nodesmsk = 0; 1614 } 1615 1616 static __inline void 1617 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1618 { 1619 sc->nodesmsk &= ~(1 << id); 1620 } 1621 1622 static struct ieee80211_node * 1623 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1624 { 1625 struct wpi_node *wn; 1626 1627 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1628 M_NOWAIT | M_ZERO); 1629 1630 if (wn == NULL) 1631 return NULL; 1632 1633 wn->id = WPI_ID_UNDEFINED; 1634 1635 return &wn->ni; 1636 } 1637 1638 static void 1639 wpi_node_free(struct ieee80211_node *ni) 1640 { 1641 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1642 struct wpi_node *wn = WPI_NODE(ni); 1643 1644 if (wn->id != WPI_ID_UNDEFINED) { 1645 WPI_NT_LOCK(sc); 1646 if (wpi_check_node_entry(sc, wn->id)) { 1647 wpi_del_node_entry(sc, wn->id); 1648 wpi_del_node(sc, ni); 1649 } 1650 WPI_NT_UNLOCK(sc); 1651 } 1652 1653 sc->sc_node_free(ni); 1654 } 1655 1656 static __inline int 1657 wpi_check_bss_filter(struct wpi_softc *sc) 1658 { 1659 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1660 } 1661 1662 static void 1663 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1664 const struct ieee80211_rx_stats *rxs, 1665 int rssi, int nf) 1666 { 1667 struct ieee80211vap *vap = ni->ni_vap; 1668 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1669 struct wpi_vap *wvp = WPI_VAP(vap); 1670 uint64_t ni_tstamp, rx_tstamp; 1671 1672 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1673 1674 if (vap->iv_state == IEEE80211_S_RUN && 1675 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1676 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1677 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1678 rx_tstamp = le64toh(sc->rx_tstamp); 1679 1680 if (ni_tstamp >= rx_tstamp) { 1681 DPRINTF(sc, WPI_DEBUG_STATE, 1682 "ibss merge, tsf %ju tstamp %ju\n", 1683 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1684 (void) ieee80211_ibss_merge(ni); 1685 } 1686 } 1687 } 1688 1689 static void 1690 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1691 { 1692 struct wpi_softc *sc = arg; 1693 struct wpi_node *wn = WPI_NODE(ni); 1694 int error; 1695 1696 WPI_NT_LOCK(sc); 1697 if (wn->id != WPI_ID_UNDEFINED) { 1698 wn->id = WPI_ID_UNDEFINED; 1699 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1700 device_printf(sc->sc_dev, 1701 "%s: could not add IBSS node, error %d\n", 1702 __func__, error); 1703 } 1704 } 1705 WPI_NT_UNLOCK(sc); 1706 } 1707 1708 static void 1709 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1710 { 1711 struct ieee80211com *ic = &sc->sc_ic; 1712 1713 /* Set group keys once. */ 1714 WPI_NT_LOCK(sc); 1715 wvp->wv_gtk = 0; 1716 WPI_NT_UNLOCK(sc); 1717 1718 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1719 ieee80211_crypto_reload_keys(ic); 1720 } 1721 1722 /** 1723 * Called by net80211 when ever there is a change to 80211 state machine 1724 */ 1725 static int 1726 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1727 { 1728 struct wpi_vap *wvp = WPI_VAP(vap); 1729 struct ieee80211com *ic = vap->iv_ic; 1730 struct wpi_softc *sc = ic->ic_softc; 1731 int error = 0; 1732 1733 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1734 1735 WPI_TXQ_LOCK(sc); 1736 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1737 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1738 WPI_TXQ_UNLOCK(sc); 1739 1740 return ENXIO; 1741 } 1742 WPI_TXQ_UNLOCK(sc); 1743 1744 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1745 ieee80211_state_name[vap->iv_state], 1746 ieee80211_state_name[nstate]); 1747 1748 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1749 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1750 device_printf(sc->sc_dev, 1751 "%s: could not set power saving level\n", 1752 __func__); 1753 return error; 1754 } 1755 1756 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1757 } 1758 1759 switch (nstate) { 1760 case IEEE80211_S_SCAN: 1761 WPI_RXON_LOCK(sc); 1762 if (wpi_check_bss_filter(sc) != 0) { 1763 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1764 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1765 device_printf(sc->sc_dev, 1766 "%s: could not send RXON\n", __func__); 1767 } 1768 } 1769 WPI_RXON_UNLOCK(sc); 1770 break; 1771 1772 case IEEE80211_S_ASSOC: 1773 if (vap->iv_state != IEEE80211_S_RUN) 1774 break; 1775 /* FALLTHROUGH */ 1776 case IEEE80211_S_AUTH: 1777 /* 1778 * NB: do not optimize AUTH -> AUTH state transmission - 1779 * this will break powersave with non-QoS AP! 1780 */ 1781 1782 /* 1783 * The node must be registered in the firmware before auth. 1784 * Also the associd must be cleared on RUN -> ASSOC 1785 * transitions. 1786 */ 1787 if ((error = wpi_auth(sc, vap)) != 0) { 1788 device_printf(sc->sc_dev, 1789 "%s: could not move to AUTH state, error %d\n", 1790 __func__, error); 1791 } 1792 break; 1793 1794 case IEEE80211_S_RUN: 1795 /* 1796 * RUN -> RUN transition: 1797 * STA mode: Just restart the timers. 1798 * IBSS mode: Process IBSS merge. 1799 */ 1800 if (vap->iv_state == IEEE80211_S_RUN) { 1801 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1802 WPI_RXON_LOCK(sc); 1803 wpi_calib_timeout(sc); 1804 WPI_RXON_UNLOCK(sc); 1805 break; 1806 } else { 1807 /* 1808 * Drop the BSS_FILTER bit 1809 * (there is no another way to change bssid). 1810 */ 1811 WPI_RXON_LOCK(sc); 1812 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1813 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1814 device_printf(sc->sc_dev, 1815 "%s: could not send RXON\n", 1816 __func__); 1817 } 1818 WPI_RXON_UNLOCK(sc); 1819 1820 /* Restore all what was lost. */ 1821 wpi_restore_node_table(sc, wvp); 1822 1823 /* XXX set conditionally? */ 1824 wpi_updateedca(ic); 1825 } 1826 } 1827 1828 /* 1829 * !RUN -> RUN requires setting the association id 1830 * which is done with a firmware cmd. We also defer 1831 * starting the timers until that work is done. 1832 */ 1833 if ((error = wpi_run(sc, vap)) != 0) { 1834 device_printf(sc->sc_dev, 1835 "%s: could not move to RUN state\n", __func__); 1836 } 1837 break; 1838 1839 default: 1840 break; 1841 } 1842 if (error != 0) { 1843 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1844 return error; 1845 } 1846 1847 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1848 1849 return wvp->wv_newstate(vap, nstate, arg); 1850 } 1851 1852 static void 1853 wpi_calib_timeout(void *arg) 1854 { 1855 struct wpi_softc *sc = arg; 1856 1857 if (wpi_check_bss_filter(sc) == 0) 1858 return; 1859 1860 wpi_power_calibration(sc); 1861 1862 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1863 } 1864 1865 static __inline uint8_t 1866 rate2plcp(const uint8_t rate) 1867 { 1868 switch (rate) { 1869 case 12: return 0xd; 1870 case 18: return 0xf; 1871 case 24: return 0x5; 1872 case 36: return 0x7; 1873 case 48: return 0x9; 1874 case 72: return 0xb; 1875 case 96: return 0x1; 1876 case 108: return 0x3; 1877 case 2: return 10; 1878 case 4: return 20; 1879 case 11: return 55; 1880 case 22: return 110; 1881 default: return 0; 1882 } 1883 } 1884 1885 static __inline uint8_t 1886 plcp2rate(const uint8_t plcp) 1887 { 1888 switch (plcp) { 1889 case 0xd: return 12; 1890 case 0xf: return 18; 1891 case 0x5: return 24; 1892 case 0x7: return 36; 1893 case 0x9: return 48; 1894 case 0xb: return 72; 1895 case 0x1: return 96; 1896 case 0x3: return 108; 1897 case 10: return 2; 1898 case 20: return 4; 1899 case 55: return 11; 1900 case 110: return 22; 1901 default: return 0; 1902 } 1903 } 1904 1905 /* Quickly determine if a given rate is CCK or OFDM. */ 1906 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1907 1908 static void 1909 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1910 struct wpi_rx_data *data) 1911 { 1912 struct ieee80211com *ic = &sc->sc_ic; 1913 struct wpi_rx_ring *ring = &sc->rxq; 1914 struct wpi_rx_stat *stat; 1915 struct wpi_rx_head *head; 1916 struct wpi_rx_tail *tail; 1917 struct ieee80211_frame *wh; 1918 struct ieee80211_node *ni; 1919 struct mbuf *m, *m1; 1920 bus_addr_t paddr; 1921 uint32_t flags; 1922 uint16_t len; 1923 int error; 1924 1925 stat = (struct wpi_rx_stat *)(desc + 1); 1926 1927 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1928 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1929 goto fail1; 1930 } 1931 1932 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1933 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1934 len = le16toh(head->len); 1935 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1936 flags = le32toh(tail->flags); 1937 1938 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1939 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1940 le32toh(desc->len), len, (int8_t)stat->rssi, 1941 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1942 1943 /* Discard frames with a bad FCS early. */ 1944 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1945 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1946 __func__, flags); 1947 goto fail1; 1948 } 1949 /* Discard frames that are too short. */ 1950 if (len < sizeof (struct ieee80211_frame_ack)) { 1951 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1952 __func__, len); 1953 goto fail1; 1954 } 1955 1956 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1957 if (__predict_false(m1 == NULL)) { 1958 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1959 __func__); 1960 goto fail1; 1961 } 1962 bus_dmamap_unload(ring->data_dmat, data->map); 1963 1964 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1965 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1966 if (__predict_false(error != 0 && error != EFBIG)) { 1967 device_printf(sc->sc_dev, 1968 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1969 m_freem(m1); 1970 1971 /* Try to reload the old mbuf. */ 1972 error = bus_dmamap_load(ring->data_dmat, data->map, 1973 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1974 &paddr, BUS_DMA_NOWAIT); 1975 if (error != 0 && error != EFBIG) { 1976 panic("%s: could not load old RX mbuf", __func__); 1977 } 1978 /* Physical address may have changed. */ 1979 ring->desc[ring->cur] = htole32(paddr); 1980 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1981 BUS_DMASYNC_PREWRITE); 1982 goto fail1; 1983 } 1984 1985 m = data->m; 1986 data->m = m1; 1987 /* Update RX descriptor. */ 1988 ring->desc[ring->cur] = htole32(paddr); 1989 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1990 BUS_DMASYNC_PREWRITE); 1991 1992 /* Finalize mbuf. */ 1993 m->m_data = (caddr_t)(head + 1); 1994 m->m_pkthdr.len = m->m_len = len; 1995 1996 /* Grab a reference to the source node. */ 1997 wh = mtod(m, struct ieee80211_frame *); 1998 1999 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2000 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2001 /* Check whether decryption was successful or not. */ 2002 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2003 DPRINTF(sc, WPI_DEBUG_RECV, 2004 "CCMP decryption failed 0x%x\n", flags); 2005 goto fail2; 2006 } 2007 m->m_flags |= M_WEP; 2008 } 2009 2010 if (len >= sizeof(struct ieee80211_frame_min)) 2011 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2012 else 2013 ni = NULL; 2014 2015 sc->rx_tstamp = tail->tstamp; 2016 2017 if (ieee80211_radiotap_active(ic)) { 2018 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2019 2020 tap->wr_flags = 0; 2021 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2022 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2023 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2024 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2025 tap->wr_tsft = tail->tstamp; 2026 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2027 tap->wr_rate = plcp2rate(head->plcp); 2028 } 2029 2030 WPI_UNLOCK(sc); 2031 2032 /* Send the frame to the 802.11 layer. */ 2033 if (ni != NULL) { 2034 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2035 /* Node is no longer needed. */ 2036 ieee80211_free_node(ni); 2037 } else 2038 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2039 2040 WPI_LOCK(sc); 2041 2042 return; 2043 2044 fail2: m_freem(m); 2045 2046 fail1: counter_u64_add(ic->ic_ierrors, 1); 2047 } 2048 2049 static void 2050 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2051 struct wpi_rx_data *data) 2052 { 2053 /* Ignore */ 2054 } 2055 2056 static void 2057 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2058 { 2059 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2060 struct wpi_tx_data *data = &ring->data[desc->idx]; 2061 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2062 struct mbuf *m; 2063 struct ieee80211_node *ni; 2064 struct ieee80211vap *vap; 2065 struct ieee80211com *ic; 2066 uint32_t status = le32toh(stat->status); 2067 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2068 2069 KASSERT(data->ni != NULL, ("no node")); 2070 KASSERT(data->m != NULL, ("no mbuf")); 2071 2072 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2073 2074 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2075 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2076 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2077 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2078 2079 /* Unmap and free mbuf. */ 2080 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2081 bus_dmamap_unload(ring->data_dmat, data->map); 2082 m = data->m, data->m = NULL; 2083 ni = data->ni, data->ni = NULL; 2084 vap = ni->ni_vap; 2085 ic = vap->iv_ic; 2086 2087 /* 2088 * Update rate control statistics for the node. 2089 */ 2090 if (status & WPI_TX_STATUS_FAIL) { 2091 ieee80211_ratectl_tx_complete(vap, ni, 2092 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2093 } else 2094 ieee80211_ratectl_tx_complete(vap, ni, 2095 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2096 2097 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2098 2099 WPI_TXQ_STATE_LOCK(sc); 2100 if (--ring->queued > 0) 2101 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2102 else 2103 callout_stop(&sc->tx_timeout); 2104 WPI_TXQ_STATE_UNLOCK(sc); 2105 2106 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2107 } 2108 2109 /* 2110 * Process a "command done" firmware notification. This is where we wakeup 2111 * processes waiting for a synchronous command completion. 2112 */ 2113 static void 2114 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2115 { 2116 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2117 struct wpi_tx_data *data; 2118 struct wpi_tx_cmd *cmd; 2119 2120 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2121 "type %s len %d\n", desc->qid, desc->idx, 2122 desc->flags, wpi_cmd_str(desc->type), 2123 le32toh(desc->len)); 2124 2125 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2126 return; /* Not a command ack. */ 2127 2128 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2129 2130 data = &ring->data[desc->idx]; 2131 cmd = &ring->cmd[desc->idx]; 2132 2133 /* If the command was mapped in an mbuf, free it. */ 2134 if (data->m != NULL) { 2135 bus_dmamap_sync(ring->data_dmat, data->map, 2136 BUS_DMASYNC_POSTWRITE); 2137 bus_dmamap_unload(ring->data_dmat, data->map); 2138 m_freem(data->m); 2139 data->m = NULL; 2140 } 2141 2142 wakeup(cmd); 2143 2144 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2145 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2146 2147 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2148 BUS_DMASYNC_POSTREAD); 2149 2150 WPI_TXQ_LOCK(sc); 2151 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2152 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2153 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2154 } else { 2155 sc->sc_update_rx_ring = wpi_update_rx_ring; 2156 sc->sc_update_tx_ring = wpi_update_tx_ring; 2157 } 2158 WPI_TXQ_UNLOCK(sc); 2159 } 2160 } 2161 2162 static void 2163 wpi_notif_intr(struct wpi_softc *sc) 2164 { 2165 struct ieee80211com *ic = &sc->sc_ic; 2166 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2167 uint32_t hw; 2168 2169 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2170 BUS_DMASYNC_POSTREAD); 2171 2172 hw = le32toh(sc->shared->next) & 0xfff; 2173 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2174 2175 while (sc->rxq.cur != hw) { 2176 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2177 2178 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2179 struct wpi_rx_desc *desc; 2180 2181 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2182 BUS_DMASYNC_POSTREAD); 2183 desc = mtod(data->m, struct wpi_rx_desc *); 2184 2185 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2186 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2187 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2188 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2189 2190 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2191 /* Reply to a command. */ 2192 wpi_cmd_done(sc, desc); 2193 } 2194 2195 switch (desc->type) { 2196 case WPI_RX_DONE: 2197 /* An 802.11 frame has been received. */ 2198 wpi_rx_done(sc, desc, data); 2199 2200 if (__predict_false(sc->sc_running == 0)) { 2201 /* wpi_stop() was called. */ 2202 return; 2203 } 2204 2205 break; 2206 2207 case WPI_TX_DONE: 2208 /* An 802.11 frame has been transmitted. */ 2209 wpi_tx_done(sc, desc); 2210 break; 2211 2212 case WPI_RX_STATISTICS: 2213 case WPI_BEACON_STATISTICS: 2214 wpi_rx_statistics(sc, desc, data); 2215 break; 2216 2217 case WPI_BEACON_MISSED: 2218 { 2219 struct wpi_beacon_missed *miss = 2220 (struct wpi_beacon_missed *)(desc + 1); 2221 uint32_t expected, misses, received, threshold; 2222 2223 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2224 BUS_DMASYNC_POSTREAD); 2225 2226 misses = le32toh(miss->consecutive); 2227 expected = le32toh(miss->expected); 2228 received = le32toh(miss->received); 2229 threshold = MAX(2, vap->iv_bmissthreshold); 2230 2231 DPRINTF(sc, WPI_DEBUG_BMISS, 2232 "%s: beacons missed %u(%u) (received %u/%u)\n", 2233 __func__, misses, le32toh(miss->total), received, 2234 expected); 2235 2236 if (misses >= threshold || 2237 (received == 0 && expected >= threshold)) { 2238 WPI_RXON_LOCK(sc); 2239 if (callout_pending(&sc->scan_timeout)) { 2240 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2241 0, 1); 2242 } 2243 WPI_RXON_UNLOCK(sc); 2244 if (vap->iv_state == IEEE80211_S_RUN && 2245 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2246 ieee80211_beacon_miss(ic); 2247 } 2248 2249 break; 2250 } 2251 #ifdef WPI_DEBUG 2252 case WPI_BEACON_SENT: 2253 { 2254 struct wpi_tx_stat *stat = 2255 (struct wpi_tx_stat *)(desc + 1); 2256 uint64_t *tsf = (uint64_t *)(stat + 1); 2257 uint32_t *mode = (uint32_t *)(tsf + 1); 2258 2259 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2260 BUS_DMASYNC_POSTREAD); 2261 2262 DPRINTF(sc, WPI_DEBUG_BEACON, 2263 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2264 "duration %u, status %x, tsf %ju, mode %x\n", 2265 stat->rtsfailcnt, stat->ackfailcnt, 2266 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2267 le32toh(stat->status), le64toh(*tsf), 2268 le32toh(*mode)); 2269 2270 break; 2271 } 2272 #endif 2273 case WPI_UC_READY: 2274 { 2275 struct wpi_ucode_info *uc = 2276 (struct wpi_ucode_info *)(desc + 1); 2277 2278 /* The microcontroller is ready. */ 2279 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2280 BUS_DMASYNC_POSTREAD); 2281 DPRINTF(sc, WPI_DEBUG_RESET, 2282 "microcode alive notification version=%d.%d " 2283 "subtype=%x alive=%x\n", uc->major, uc->minor, 2284 uc->subtype, le32toh(uc->valid)); 2285 2286 if (le32toh(uc->valid) != 1) { 2287 device_printf(sc->sc_dev, 2288 "microcontroller initialization failed\n"); 2289 wpi_stop_locked(sc); 2290 return; 2291 } 2292 /* Save the address of the error log in SRAM. */ 2293 sc->errptr = le32toh(uc->errptr); 2294 break; 2295 } 2296 case WPI_STATE_CHANGED: 2297 { 2298 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2299 BUS_DMASYNC_POSTREAD); 2300 2301 uint32_t *status = (uint32_t *)(desc + 1); 2302 2303 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2304 le32toh(*status)); 2305 2306 if (le32toh(*status) & 1) { 2307 WPI_NT_LOCK(sc); 2308 wpi_clear_node_table(sc); 2309 WPI_NT_UNLOCK(sc); 2310 taskqueue_enqueue(sc->sc_tq, 2311 &sc->sc_radiooff_task); 2312 return; 2313 } 2314 break; 2315 } 2316 #ifdef WPI_DEBUG 2317 case WPI_START_SCAN: 2318 { 2319 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2320 BUS_DMASYNC_POSTREAD); 2321 2322 struct wpi_start_scan *scan = 2323 (struct wpi_start_scan *)(desc + 1); 2324 DPRINTF(sc, WPI_DEBUG_SCAN, 2325 "%s: scanning channel %d status %x\n", 2326 __func__, scan->chan, le32toh(scan->status)); 2327 2328 break; 2329 } 2330 #endif 2331 case WPI_STOP_SCAN: 2332 { 2333 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2334 BUS_DMASYNC_POSTREAD); 2335 2336 struct wpi_stop_scan *scan = 2337 (struct wpi_stop_scan *)(desc + 1); 2338 2339 DPRINTF(sc, WPI_DEBUG_SCAN, 2340 "scan finished nchan=%d status=%d chan=%d\n", 2341 scan->nchan, scan->status, scan->chan); 2342 2343 WPI_RXON_LOCK(sc); 2344 callout_stop(&sc->scan_timeout); 2345 WPI_RXON_UNLOCK(sc); 2346 if (scan->status == WPI_SCAN_ABORTED) 2347 ieee80211_cancel_scan(vap); 2348 else 2349 ieee80211_scan_next(vap); 2350 break; 2351 } 2352 } 2353 2354 if (sc->rxq.cur % 8 == 0) { 2355 /* Tell the firmware what we have processed. */ 2356 sc->sc_update_rx_ring(sc); 2357 } 2358 } 2359 } 2360 2361 /* 2362 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2363 * from power-down sleep mode. 2364 */ 2365 static void 2366 wpi_wakeup_intr(struct wpi_softc *sc) 2367 { 2368 int qid; 2369 2370 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2371 "%s: ucode wakeup from power-down sleep\n", __func__); 2372 2373 /* Wakeup RX and TX rings. */ 2374 if (sc->rxq.update) { 2375 sc->rxq.update = 0; 2376 wpi_update_rx_ring(sc); 2377 } 2378 WPI_TXQ_LOCK(sc); 2379 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2380 struct wpi_tx_ring *ring = &sc->txq[qid]; 2381 2382 if (ring->update) { 2383 ring->update = 0; 2384 wpi_update_tx_ring(sc, ring); 2385 } 2386 } 2387 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2388 WPI_TXQ_UNLOCK(sc); 2389 } 2390 2391 /* 2392 * This function prints firmware registers 2393 */ 2394 #ifdef WPI_DEBUG 2395 static void 2396 wpi_debug_registers(struct wpi_softc *sc) 2397 { 2398 size_t i; 2399 static const uint32_t csr_tbl[] = { 2400 WPI_HW_IF_CONFIG, 2401 WPI_INT, 2402 WPI_INT_MASK, 2403 WPI_FH_INT, 2404 WPI_GPIO_IN, 2405 WPI_RESET, 2406 WPI_GP_CNTRL, 2407 WPI_EEPROM, 2408 WPI_EEPROM_GP, 2409 WPI_GIO, 2410 WPI_UCODE_GP1, 2411 WPI_UCODE_GP2, 2412 WPI_GIO_CHICKEN, 2413 WPI_ANA_PLL, 2414 WPI_DBG_HPET_MEM, 2415 }; 2416 static const uint32_t prph_tbl[] = { 2417 WPI_APMG_CLK_CTRL, 2418 WPI_APMG_PS, 2419 WPI_APMG_PCI_STT, 2420 WPI_APMG_RFKILL, 2421 }; 2422 2423 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2424 2425 for (i = 0; i < nitems(csr_tbl); i++) { 2426 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2427 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2428 2429 if ((i + 1) % 2 == 0) 2430 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2431 } 2432 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2433 2434 if (wpi_nic_lock(sc) == 0) { 2435 for (i = 0; i < nitems(prph_tbl); i++) { 2436 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2437 wpi_get_prph_string(prph_tbl[i]), 2438 wpi_prph_read(sc, prph_tbl[i])); 2439 2440 if ((i + 1) % 2 == 0) 2441 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2442 } 2443 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2444 wpi_nic_unlock(sc); 2445 } else { 2446 DPRINTF(sc, WPI_DEBUG_REGISTER, 2447 "Cannot access internal registers.\n"); 2448 } 2449 } 2450 #endif 2451 2452 /* 2453 * Dump the error log of the firmware when a firmware panic occurs. Although 2454 * we can't debug the firmware because it is neither open source nor free, it 2455 * can help us to identify certain classes of problems. 2456 */ 2457 static void 2458 wpi_fatal_intr(struct wpi_softc *sc) 2459 { 2460 struct wpi_fw_dump dump; 2461 uint32_t i, offset, count; 2462 2463 /* Check that the error log address is valid. */ 2464 if (sc->errptr < WPI_FW_DATA_BASE || 2465 sc->errptr + sizeof (dump) > 2466 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2467 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2468 sc->errptr); 2469 return; 2470 } 2471 if (wpi_nic_lock(sc) != 0) { 2472 printf("%s: could not read firmware error log\n", __func__); 2473 return; 2474 } 2475 /* Read number of entries in the log. */ 2476 count = wpi_mem_read(sc, sc->errptr); 2477 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2478 printf("%s: invalid count field (count = %u)\n", __func__, 2479 count); 2480 wpi_nic_unlock(sc); 2481 return; 2482 } 2483 /* Skip "count" field. */ 2484 offset = sc->errptr + sizeof (uint32_t); 2485 printf("firmware error log (count = %u):\n", count); 2486 for (i = 0; i < count; i++) { 2487 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2488 sizeof (dump) / sizeof (uint32_t)); 2489 2490 printf(" error type = \"%s\" (0x%08X)\n", 2491 (dump.desc < nitems(wpi_fw_errmsg)) ? 2492 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2493 dump.desc); 2494 printf(" error data = 0x%08X\n", 2495 dump.data); 2496 printf(" branch link = 0x%08X%08X\n", 2497 dump.blink[0], dump.blink[1]); 2498 printf(" interrupt link = 0x%08X%08X\n", 2499 dump.ilink[0], dump.ilink[1]); 2500 printf(" time = %u\n", dump.time); 2501 2502 offset += sizeof (dump); 2503 } 2504 wpi_nic_unlock(sc); 2505 /* Dump driver status (TX and RX rings) while we're here. */ 2506 printf("driver status:\n"); 2507 WPI_TXQ_LOCK(sc); 2508 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2509 struct wpi_tx_ring *ring = &sc->txq[i]; 2510 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2511 i, ring->qid, ring->cur, ring->queued); 2512 } 2513 WPI_TXQ_UNLOCK(sc); 2514 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2515 } 2516 2517 static void 2518 wpi_intr(void *arg) 2519 { 2520 struct wpi_softc *sc = arg; 2521 uint32_t r1, r2; 2522 2523 WPI_LOCK(sc); 2524 2525 /* Disable interrupts. */ 2526 WPI_WRITE(sc, WPI_INT_MASK, 0); 2527 2528 r1 = WPI_READ(sc, WPI_INT); 2529 2530 if (__predict_false(r1 == 0xffffffff || 2531 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2532 goto end; /* Hardware gone! */ 2533 2534 r2 = WPI_READ(sc, WPI_FH_INT); 2535 2536 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2537 r1, r2); 2538 2539 if (r1 == 0 && r2 == 0) 2540 goto done; /* Interrupt not for us. */ 2541 2542 /* Acknowledge interrupts. */ 2543 WPI_WRITE(sc, WPI_INT, r1); 2544 WPI_WRITE(sc, WPI_FH_INT, r2); 2545 2546 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2547 device_printf(sc->sc_dev, "fatal firmware error\n"); 2548 #ifdef WPI_DEBUG 2549 wpi_debug_registers(sc); 2550 #endif 2551 wpi_fatal_intr(sc); 2552 DPRINTF(sc, WPI_DEBUG_HW, 2553 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2554 "(Hardware Error)"); 2555 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2556 goto end; 2557 } 2558 2559 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2560 (r2 & WPI_FH_INT_RX)) 2561 wpi_notif_intr(sc); 2562 2563 if (r1 & WPI_INT_ALIVE) 2564 wakeup(sc); /* Firmware is alive. */ 2565 2566 if (r1 & WPI_INT_WAKEUP) 2567 wpi_wakeup_intr(sc); 2568 2569 done: 2570 /* Re-enable interrupts. */ 2571 if (__predict_true(sc->sc_running)) 2572 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2573 2574 end: WPI_UNLOCK(sc); 2575 } 2576 2577 static void 2578 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2579 { 2580 struct wpi_tx_ring *ring; 2581 struct wpi_tx_data *data; 2582 uint8_t cur; 2583 2584 WPI_TXQ_LOCK(sc); 2585 ring = &sc->txq[ac]; 2586 2587 while (ring->pending != 0) { 2588 ring->pending--; 2589 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2590 data = &ring->data[cur]; 2591 2592 bus_dmamap_sync(ring->data_dmat, data->map, 2593 BUS_DMASYNC_POSTWRITE); 2594 bus_dmamap_unload(ring->data_dmat, data->map); 2595 m_freem(data->m); 2596 data->m = NULL; 2597 2598 ieee80211_node_decref(data->ni); 2599 data->ni = NULL; 2600 } 2601 2602 WPI_TXQ_UNLOCK(sc); 2603 } 2604 2605 static int 2606 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2607 { 2608 struct ieee80211_frame *wh; 2609 struct wpi_tx_cmd *cmd; 2610 struct wpi_tx_data *data; 2611 struct wpi_tx_desc *desc; 2612 struct wpi_tx_ring *ring; 2613 struct mbuf *m1; 2614 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2615 uint8_t cur, pad; 2616 uint16_t hdrlen; 2617 int error, i, nsegs, totlen, frag; 2618 2619 WPI_TXQ_LOCK(sc); 2620 2621 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2622 2623 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2624 2625 if (__predict_false(sc->sc_running == 0)) { 2626 /* wpi_stop() was called */ 2627 error = ENETDOWN; 2628 goto end; 2629 } 2630 2631 wh = mtod(buf->m, struct ieee80211_frame *); 2632 hdrlen = ieee80211_anyhdrsize(wh); 2633 totlen = buf->m->m_pkthdr.len; 2634 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2635 2636 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2637 error = EINVAL; 2638 goto end; 2639 } 2640 2641 if (hdrlen & 3) { 2642 /* First segment length must be a multiple of 4. */ 2643 pad = 4 - (hdrlen & 3); 2644 } else 2645 pad = 0; 2646 2647 ring = &sc->txq[buf->ac]; 2648 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2649 desc = &ring->desc[cur]; 2650 data = &ring->data[cur]; 2651 2652 /* Prepare TX firmware command. */ 2653 cmd = &ring->cmd[cur]; 2654 cmd->code = buf->code; 2655 cmd->flags = 0; 2656 cmd->qid = ring->qid; 2657 cmd->idx = cur; 2658 2659 memcpy(cmd->data, buf->data, buf->size); 2660 2661 /* Save and trim IEEE802.11 header. */ 2662 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2663 m_adj(buf->m, hdrlen); 2664 2665 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2666 segs, &nsegs, BUS_DMA_NOWAIT); 2667 if (error != 0 && error != EFBIG) { 2668 device_printf(sc->sc_dev, 2669 "%s: can't map mbuf (error %d)\n", __func__, error); 2670 goto end; 2671 } 2672 if (error != 0) { 2673 /* Too many DMA segments, linearize mbuf. */ 2674 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2675 if (m1 == NULL) { 2676 device_printf(sc->sc_dev, 2677 "%s: could not defrag mbuf\n", __func__); 2678 error = ENOBUFS; 2679 goto end; 2680 } 2681 buf->m = m1; 2682 2683 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2684 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2685 if (__predict_false(error != 0)) { 2686 /* XXX fix this (applicable to the iwn(4) too) */ 2687 /* 2688 * NB: Do not return error; 2689 * original mbuf does not exist anymore. 2690 */ 2691 device_printf(sc->sc_dev, 2692 "%s: can't map mbuf (error %d)\n", __func__, 2693 error); 2694 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2695 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2696 IFCOUNTER_OERRORS, 1); 2697 if (!frag) 2698 ieee80211_free_node(buf->ni); 2699 } 2700 m_freem(buf->m); 2701 error = 0; 2702 goto end; 2703 } 2704 } 2705 2706 KASSERT(nsegs < WPI_MAX_SCATTER, 2707 ("too many DMA segments, nsegs (%d) should be less than %d", 2708 nsegs, WPI_MAX_SCATTER)); 2709 2710 data->m = buf->m; 2711 data->ni = buf->ni; 2712 2713 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2714 __func__, ring->qid, cur, totlen, nsegs); 2715 2716 /* Fill TX descriptor. */ 2717 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2718 /* First DMA segment is used by the TX command. */ 2719 desc->segs[0].addr = htole32(data->cmd_paddr); 2720 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2721 /* Other DMA segments are for data payload. */ 2722 seg = &segs[0]; 2723 for (i = 1; i <= nsegs; i++) { 2724 desc->segs[i].addr = htole32(seg->ds_addr); 2725 desc->segs[i].len = htole32(seg->ds_len); 2726 seg++; 2727 } 2728 2729 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2730 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2731 BUS_DMASYNC_PREWRITE); 2732 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2733 BUS_DMASYNC_PREWRITE); 2734 2735 ring->pending += 1; 2736 2737 if (!frag) { 2738 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2739 WPI_TXQ_STATE_LOCK(sc); 2740 ring->queued += ring->pending; 2741 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2742 sc); 2743 WPI_TXQ_STATE_UNLOCK(sc); 2744 } 2745 2746 /* Kick TX ring. */ 2747 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2748 ring->pending = 0; 2749 sc->sc_update_tx_ring(sc, ring); 2750 } else 2751 ieee80211_node_incref(data->ni); 2752 2753 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2754 __func__); 2755 2756 WPI_TXQ_UNLOCK(sc); 2757 2758 return (error); 2759 } 2760 2761 /* 2762 * Construct the data packet for a transmit buffer. 2763 */ 2764 static int 2765 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2766 { 2767 const struct ieee80211_txparam *tp; 2768 struct ieee80211vap *vap = ni->ni_vap; 2769 struct ieee80211com *ic = ni->ni_ic; 2770 struct wpi_node *wn = WPI_NODE(ni); 2771 struct ieee80211_channel *chan; 2772 struct ieee80211_frame *wh; 2773 struct ieee80211_key *k = NULL; 2774 struct wpi_buf tx_data; 2775 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2776 uint32_t flags; 2777 uint16_t ac, qos; 2778 uint8_t tid, type, rate; 2779 int swcrypt, ismcast, totlen; 2780 2781 wh = mtod(m, struct ieee80211_frame *); 2782 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2783 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2784 swcrypt = 1; 2785 2786 /* Select EDCA Access Category and TX ring for this frame. */ 2787 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2788 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2789 tid = qos & IEEE80211_QOS_TID; 2790 } else { 2791 qos = 0; 2792 tid = 0; 2793 } 2794 ac = M_WME_GETAC(m); 2795 2796 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2797 ni->ni_chan : ic->ic_curchan; 2798 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2799 2800 /* Choose a TX rate index. */ 2801 if (type == IEEE80211_FC0_TYPE_MGT) 2802 rate = tp->mgmtrate; 2803 else if (ismcast) 2804 rate = tp->mcastrate; 2805 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2806 rate = tp->ucastrate; 2807 else if (m->m_flags & M_EAPOL) 2808 rate = tp->mgmtrate; 2809 else { 2810 /* XXX pass pktlen */ 2811 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2812 rate = ni->ni_txrate; 2813 } 2814 2815 /* Encrypt the frame if need be. */ 2816 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2817 /* Retrieve key for TX. */ 2818 k = ieee80211_crypto_encap(ni, m); 2819 if (k == NULL) 2820 return (ENOBUFS); 2821 2822 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2823 2824 /* 802.11 header may have moved. */ 2825 wh = mtod(m, struct ieee80211_frame *); 2826 } 2827 totlen = m->m_pkthdr.len; 2828 2829 if (ieee80211_radiotap_active_vap(vap)) { 2830 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2831 2832 tap->wt_flags = 0; 2833 tap->wt_rate = rate; 2834 if (k != NULL) 2835 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2836 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2837 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2838 2839 ieee80211_radiotap_tx(vap, m); 2840 } 2841 2842 flags = 0; 2843 if (!ismcast) { 2844 /* Unicast frame, check if an ACK is expected. */ 2845 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2846 IEEE80211_QOS_ACKPOLICY_NOACK) 2847 flags |= WPI_TX_NEED_ACK; 2848 } 2849 2850 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2851 flags |= WPI_TX_AUTO_SEQ; 2852 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2853 flags |= WPI_TX_MORE_FRAG; 2854 2855 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2856 if (!ismcast) { 2857 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2858 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2859 flags |= WPI_TX_NEED_RTS; 2860 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2861 WPI_RATE_IS_OFDM(rate)) { 2862 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2863 flags |= WPI_TX_NEED_CTS; 2864 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2865 flags |= WPI_TX_NEED_RTS; 2866 } 2867 2868 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2869 flags |= WPI_TX_FULL_TXOP; 2870 } 2871 2872 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2873 if (type == IEEE80211_FC0_TYPE_MGT) { 2874 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2875 2876 /* Tell HW to set timestamp in probe responses. */ 2877 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2878 flags |= WPI_TX_INSERT_TSTAMP; 2879 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2880 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2881 tx->timeout = htole16(3); 2882 else 2883 tx->timeout = htole16(2); 2884 } 2885 2886 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2887 tx->id = WPI_ID_BROADCAST; 2888 else { 2889 if (wn->id == WPI_ID_UNDEFINED) { 2890 device_printf(sc->sc_dev, 2891 "%s: undefined node id\n", __func__); 2892 return (EINVAL); 2893 } 2894 2895 tx->id = wn->id; 2896 } 2897 2898 if (!swcrypt) { 2899 switch (k->wk_cipher->ic_cipher) { 2900 case IEEE80211_CIPHER_AES_CCM: 2901 tx->security = WPI_CIPHER_CCMP; 2902 break; 2903 2904 default: 2905 break; 2906 } 2907 2908 memcpy(tx->key, k->wk_key, k->wk_keylen); 2909 } 2910 2911 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2912 struct mbuf *next = m->m_nextpkt; 2913 2914 tx->lnext = htole16(next->m_pkthdr.len); 2915 tx->fnext = htole32(tx->security | 2916 (flags & WPI_TX_NEED_ACK) | 2917 WPI_NEXT_STA_ID(tx->id)); 2918 } 2919 2920 tx->len = htole16(totlen); 2921 tx->flags = htole32(flags); 2922 tx->plcp = rate2plcp(rate); 2923 tx->tid = tid; 2924 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2925 tx->ofdm_mask = 0xff; 2926 tx->cck_mask = 0x0f; 2927 tx->rts_ntries = 7; 2928 tx->data_ntries = tp->maxretry; 2929 2930 tx_data.ni = ni; 2931 tx_data.m = m; 2932 tx_data.size = sizeof(struct wpi_cmd_data); 2933 tx_data.code = WPI_CMD_TX_DATA; 2934 tx_data.ac = ac; 2935 2936 return wpi_cmd2(sc, &tx_data); 2937 } 2938 2939 static int 2940 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2941 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2942 { 2943 struct ieee80211vap *vap = ni->ni_vap; 2944 struct ieee80211_key *k = NULL; 2945 struct ieee80211_frame *wh; 2946 struct wpi_buf tx_data; 2947 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2948 uint32_t flags; 2949 uint8_t ac, type, rate; 2950 int swcrypt, totlen; 2951 2952 wh = mtod(m, struct ieee80211_frame *); 2953 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2954 swcrypt = 1; 2955 2956 ac = params->ibp_pri & 3; 2957 2958 /* Choose a TX rate index. */ 2959 rate = params->ibp_rate0; 2960 2961 flags = 0; 2962 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2963 flags |= WPI_TX_AUTO_SEQ; 2964 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2965 flags |= WPI_TX_NEED_ACK; 2966 if (params->ibp_flags & IEEE80211_BPF_RTS) 2967 flags |= WPI_TX_NEED_RTS; 2968 if (params->ibp_flags & IEEE80211_BPF_CTS) 2969 flags |= WPI_TX_NEED_CTS; 2970 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2971 flags |= WPI_TX_FULL_TXOP; 2972 2973 /* Encrypt the frame if need be. */ 2974 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2975 /* Retrieve key for TX. */ 2976 k = ieee80211_crypto_encap(ni, m); 2977 if (k == NULL) 2978 return (ENOBUFS); 2979 2980 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2981 2982 /* 802.11 header may have moved. */ 2983 wh = mtod(m, struct ieee80211_frame *); 2984 } 2985 totlen = m->m_pkthdr.len; 2986 2987 if (ieee80211_radiotap_active_vap(vap)) { 2988 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2989 2990 tap->wt_flags = 0; 2991 tap->wt_rate = rate; 2992 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2993 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2994 2995 ieee80211_radiotap_tx(vap, m); 2996 } 2997 2998 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2999 if (type == IEEE80211_FC0_TYPE_MGT) { 3000 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3001 3002 /* Tell HW to set timestamp in probe responses. */ 3003 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3004 flags |= WPI_TX_INSERT_TSTAMP; 3005 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3006 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3007 tx->timeout = htole16(3); 3008 else 3009 tx->timeout = htole16(2); 3010 } 3011 3012 if (!swcrypt) { 3013 switch (k->wk_cipher->ic_cipher) { 3014 case IEEE80211_CIPHER_AES_CCM: 3015 tx->security = WPI_CIPHER_CCMP; 3016 break; 3017 3018 default: 3019 break; 3020 } 3021 3022 memcpy(tx->key, k->wk_key, k->wk_keylen); 3023 } 3024 3025 tx->len = htole16(totlen); 3026 tx->flags = htole32(flags); 3027 tx->plcp = rate2plcp(rate); 3028 tx->id = WPI_ID_BROADCAST; 3029 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3030 tx->rts_ntries = params->ibp_try1; 3031 tx->data_ntries = params->ibp_try0; 3032 3033 tx_data.ni = ni; 3034 tx_data.m = m; 3035 tx_data.size = sizeof(struct wpi_cmd_data); 3036 tx_data.code = WPI_CMD_TX_DATA; 3037 tx_data.ac = ac; 3038 3039 return wpi_cmd2(sc, &tx_data); 3040 } 3041 3042 static __inline int 3043 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3044 { 3045 struct wpi_tx_ring *ring = &sc->txq[ac]; 3046 int retval; 3047 3048 WPI_TXQ_STATE_LOCK(sc); 3049 retval = WPI_TX_RING_HIMARK - ring->queued; 3050 WPI_TXQ_STATE_UNLOCK(sc); 3051 3052 return retval; 3053 } 3054 3055 static int 3056 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3057 const struct ieee80211_bpf_params *params) 3058 { 3059 struct ieee80211com *ic = ni->ni_ic; 3060 struct wpi_softc *sc = ic->ic_softc; 3061 uint16_t ac; 3062 int error = 0; 3063 3064 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3065 3066 ac = M_WME_GETAC(m); 3067 3068 WPI_TX_LOCK(sc); 3069 3070 /* NB: no fragments here */ 3071 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3072 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3073 goto unlock; 3074 } 3075 3076 if (params == NULL) { 3077 /* 3078 * Legacy path; interpret frame contents to decide 3079 * precisely how to send the frame. 3080 */ 3081 error = wpi_tx_data(sc, m, ni); 3082 } else { 3083 /* 3084 * Caller supplied explicit parameters to use in 3085 * sending the frame. 3086 */ 3087 error = wpi_tx_data_raw(sc, m, ni, params); 3088 } 3089 3090 unlock: WPI_TX_UNLOCK(sc); 3091 3092 if (error != 0) { 3093 m_freem(m); 3094 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3095 3096 return error; 3097 } 3098 3099 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3100 3101 return 0; 3102 } 3103 3104 static int 3105 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3106 { 3107 struct wpi_softc *sc = ic->ic_softc; 3108 struct ieee80211_node *ni; 3109 struct mbuf *mnext; 3110 uint16_t ac; 3111 int error, nmbufs; 3112 3113 WPI_TX_LOCK(sc); 3114 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3115 3116 /* Check if interface is up & running. */ 3117 if (__predict_false(sc->sc_running == 0)) { 3118 error = ENXIO; 3119 goto unlock; 3120 } 3121 3122 nmbufs = 1; 3123 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3124 nmbufs++; 3125 3126 /* Check for available space. */ 3127 ac = M_WME_GETAC(m); 3128 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3129 error = ENOBUFS; 3130 goto unlock; 3131 } 3132 3133 error = 0; 3134 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3135 do { 3136 mnext = m->m_nextpkt; 3137 if (wpi_tx_data(sc, m, ni) != 0) { 3138 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3139 nmbufs); 3140 wpi_free_txfrags(sc, ac); 3141 ieee80211_free_mbuf(m); 3142 ieee80211_free_node(ni); 3143 break; 3144 } 3145 } while((m = mnext) != NULL); 3146 3147 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3148 3149 unlock: WPI_TX_UNLOCK(sc); 3150 3151 return (error); 3152 } 3153 3154 static void 3155 wpi_watchdog_rfkill(void *arg) 3156 { 3157 struct wpi_softc *sc = arg; 3158 struct ieee80211com *ic = &sc->sc_ic; 3159 3160 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3161 3162 /* No need to lock firmware memory. */ 3163 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3164 /* Radio kill switch is still off. */ 3165 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3166 sc); 3167 } else 3168 ieee80211_runtask(ic, &sc->sc_radioon_task); 3169 } 3170 3171 static void 3172 wpi_scan_timeout(void *arg) 3173 { 3174 struct wpi_softc *sc = arg; 3175 struct ieee80211com *ic = &sc->sc_ic; 3176 3177 ic_printf(ic, "scan timeout\n"); 3178 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3179 } 3180 3181 static void 3182 wpi_tx_timeout(void *arg) 3183 { 3184 struct wpi_softc *sc = arg; 3185 struct ieee80211com *ic = &sc->sc_ic; 3186 3187 ic_printf(ic, "device timeout\n"); 3188 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3189 } 3190 3191 static void 3192 wpi_parent(struct ieee80211com *ic) 3193 { 3194 struct wpi_softc *sc = ic->ic_softc; 3195 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3196 3197 if (ic->ic_nrunning > 0) { 3198 if (wpi_init(sc) == 0) { 3199 ieee80211_notify_radio(ic, 1); 3200 ieee80211_start_all(ic); 3201 } else { 3202 ieee80211_notify_radio(ic, 0); 3203 ieee80211_stop(vap); 3204 } 3205 } else 3206 wpi_stop(sc); 3207 } 3208 3209 /* 3210 * Send a command to the firmware. 3211 */ 3212 static int 3213 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3214 int async) 3215 { 3216 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3217 struct wpi_tx_desc *desc; 3218 struct wpi_tx_data *data; 3219 struct wpi_tx_cmd *cmd; 3220 struct mbuf *m; 3221 bus_addr_t paddr; 3222 uint16_t totlen; 3223 int error; 3224 3225 WPI_TXQ_LOCK(sc); 3226 3227 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3228 3229 if (__predict_false(sc->sc_running == 0)) { 3230 /* wpi_stop() was called */ 3231 if (code == WPI_CMD_SCAN) 3232 error = ENETDOWN; 3233 else 3234 error = 0; 3235 3236 goto fail; 3237 } 3238 3239 if (async == 0) 3240 WPI_LOCK_ASSERT(sc); 3241 3242 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3243 __func__, wpi_cmd_str(code), size, async); 3244 3245 desc = &ring->desc[ring->cur]; 3246 data = &ring->data[ring->cur]; 3247 totlen = 4 + size; 3248 3249 if (size > sizeof cmd->data) { 3250 /* Command is too large to fit in a descriptor. */ 3251 if (totlen > MCLBYTES) { 3252 error = EINVAL; 3253 goto fail; 3254 } 3255 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3256 if (m == NULL) { 3257 error = ENOMEM; 3258 goto fail; 3259 } 3260 cmd = mtod(m, struct wpi_tx_cmd *); 3261 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3262 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3263 if (error != 0) { 3264 m_freem(m); 3265 goto fail; 3266 } 3267 data->m = m; 3268 } else { 3269 cmd = &ring->cmd[ring->cur]; 3270 paddr = data->cmd_paddr; 3271 } 3272 3273 cmd->code = code; 3274 cmd->flags = 0; 3275 cmd->qid = ring->qid; 3276 cmd->idx = ring->cur; 3277 memcpy(cmd->data, buf, size); 3278 3279 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3280 desc->segs[0].addr = htole32(paddr); 3281 desc->segs[0].len = htole32(totlen); 3282 3283 if (size > sizeof cmd->data) { 3284 bus_dmamap_sync(ring->data_dmat, data->map, 3285 BUS_DMASYNC_PREWRITE); 3286 } else { 3287 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3288 BUS_DMASYNC_PREWRITE); 3289 } 3290 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3291 BUS_DMASYNC_PREWRITE); 3292 3293 /* Kick command ring. */ 3294 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3295 sc->sc_update_tx_ring(sc, ring); 3296 3297 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3298 3299 WPI_TXQ_UNLOCK(sc); 3300 3301 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3302 3303 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3304 3305 WPI_TXQ_UNLOCK(sc); 3306 3307 return error; 3308 } 3309 3310 /* 3311 * Configure HW multi-rate retries. 3312 */ 3313 static int 3314 wpi_mrr_setup(struct wpi_softc *sc) 3315 { 3316 struct ieee80211com *ic = &sc->sc_ic; 3317 struct wpi_mrr_setup mrr; 3318 uint8_t i; 3319 int error; 3320 3321 /* CCK rates (not used with 802.11a). */ 3322 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3323 mrr.rates[i].flags = 0; 3324 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3325 /* Fallback to the immediate lower CCK rate (if any.) */ 3326 mrr.rates[i].next = 3327 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3328 /* Try twice at this rate before falling back to "next". */ 3329 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3330 } 3331 /* OFDM rates (not used with 802.11b). */ 3332 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3333 mrr.rates[i].flags = 0; 3334 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3335 /* Fallback to the immediate lower rate (if any.) */ 3336 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3337 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3338 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3339 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3340 i - 1; 3341 /* Try twice at this rate before falling back to "next". */ 3342 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3343 } 3344 /* Setup MRR for control frames. */ 3345 mrr.which = htole32(WPI_MRR_CTL); 3346 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3347 if (error != 0) { 3348 device_printf(sc->sc_dev, 3349 "could not setup MRR for control frames\n"); 3350 return error; 3351 } 3352 /* Setup MRR for data frames. */ 3353 mrr.which = htole32(WPI_MRR_DATA); 3354 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3355 if (error != 0) { 3356 device_printf(sc->sc_dev, 3357 "could not setup MRR for data frames\n"); 3358 return error; 3359 } 3360 return 0; 3361 } 3362 3363 static int 3364 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3365 { 3366 struct ieee80211com *ic = ni->ni_ic; 3367 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3368 struct wpi_node *wn = WPI_NODE(ni); 3369 struct wpi_node_info node; 3370 int error; 3371 3372 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3373 3374 if (wn->id == WPI_ID_UNDEFINED) 3375 return EINVAL; 3376 3377 memset(&node, 0, sizeof node); 3378 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3379 node.id = wn->id; 3380 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3381 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3382 node.action = htole32(WPI_ACTION_SET_RATE); 3383 node.antenna = WPI_ANTENNA_BOTH; 3384 3385 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3386 wn->id, ether_sprintf(ni->ni_macaddr)); 3387 3388 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3389 if (error != 0) { 3390 device_printf(sc->sc_dev, 3391 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3392 error); 3393 return error; 3394 } 3395 3396 if (wvp->wv_gtk != 0) { 3397 error = wpi_set_global_keys(ni); 3398 if (error != 0) { 3399 device_printf(sc->sc_dev, 3400 "%s: error while setting global keys\n", __func__); 3401 return ENXIO; 3402 } 3403 } 3404 3405 return 0; 3406 } 3407 3408 /* 3409 * Broadcast node is used to send group-addressed and management frames. 3410 */ 3411 static int 3412 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3413 { 3414 struct ieee80211com *ic = &sc->sc_ic; 3415 struct wpi_node_info node; 3416 3417 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3418 3419 memset(&node, 0, sizeof node); 3420 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3421 node.id = WPI_ID_BROADCAST; 3422 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3423 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3424 node.action = htole32(WPI_ACTION_SET_RATE); 3425 node.antenna = WPI_ANTENNA_BOTH; 3426 3427 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3428 3429 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3430 } 3431 3432 static int 3433 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3434 { 3435 struct wpi_node *wn = WPI_NODE(ni); 3436 int error; 3437 3438 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3439 3440 wn->id = wpi_add_node_entry_sta(sc); 3441 3442 if ((error = wpi_add_node(sc, ni)) != 0) { 3443 wpi_del_node_entry(sc, wn->id); 3444 wn->id = WPI_ID_UNDEFINED; 3445 return error; 3446 } 3447 3448 return 0; 3449 } 3450 3451 static int 3452 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3453 { 3454 struct wpi_node *wn = WPI_NODE(ni); 3455 int error; 3456 3457 KASSERT(wn->id == WPI_ID_UNDEFINED, 3458 ("the node %d was added before", wn->id)); 3459 3460 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3461 3462 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3463 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3464 return ENOMEM; 3465 } 3466 3467 if ((error = wpi_add_node(sc, ni)) != 0) { 3468 wpi_del_node_entry(sc, wn->id); 3469 wn->id = WPI_ID_UNDEFINED; 3470 return error; 3471 } 3472 3473 return 0; 3474 } 3475 3476 static void 3477 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3478 { 3479 struct wpi_node *wn = WPI_NODE(ni); 3480 struct wpi_cmd_del_node node; 3481 int error; 3482 3483 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3484 3485 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3486 3487 memset(&node, 0, sizeof node); 3488 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3489 node.count = 1; 3490 3491 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3492 wn->id, ether_sprintf(ni->ni_macaddr)); 3493 3494 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3495 if (error != 0) { 3496 device_printf(sc->sc_dev, 3497 "%s: could not delete node %u, error %d\n", __func__, 3498 wn->id, error); 3499 } 3500 } 3501 3502 static int 3503 wpi_updateedca(struct ieee80211com *ic) 3504 { 3505 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3506 struct wpi_softc *sc = ic->ic_softc; 3507 struct wpi_edca_params cmd; 3508 int aci, error; 3509 3510 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3511 3512 memset(&cmd, 0, sizeof cmd); 3513 cmd.flags = htole32(WPI_EDCA_UPDATE); 3514 for (aci = 0; aci < WME_NUM_AC; aci++) { 3515 const struct wmeParams *ac = 3516 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3517 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3518 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3519 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3520 cmd.ac[aci].txoplimit = 3521 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3522 3523 DPRINTF(sc, WPI_DEBUG_EDCA, 3524 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3525 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3526 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3527 cmd.ac[aci].txoplimit); 3528 } 3529 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3530 3531 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3532 3533 return error; 3534 #undef WPI_EXP2 3535 } 3536 3537 static void 3538 wpi_set_promisc(struct wpi_softc *sc) 3539 { 3540 struct ieee80211com *ic = &sc->sc_ic; 3541 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3542 uint32_t promisc_filter; 3543 3544 promisc_filter = WPI_FILTER_CTL; 3545 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3546 promisc_filter |= WPI_FILTER_PROMISC; 3547 3548 if (ic->ic_promisc > 0) 3549 sc->rxon.filter |= htole32(promisc_filter); 3550 else 3551 sc->rxon.filter &= ~htole32(promisc_filter); 3552 } 3553 3554 static void 3555 wpi_update_promisc(struct ieee80211com *ic) 3556 { 3557 struct wpi_softc *sc = ic->ic_softc; 3558 3559 WPI_LOCK(sc); 3560 if (sc->sc_running == 0) { 3561 WPI_UNLOCK(sc); 3562 return; 3563 } 3564 WPI_UNLOCK(sc); 3565 3566 WPI_RXON_LOCK(sc); 3567 wpi_set_promisc(sc); 3568 3569 if (wpi_send_rxon(sc, 1, 1) != 0) { 3570 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3571 __func__); 3572 } 3573 WPI_RXON_UNLOCK(sc); 3574 } 3575 3576 static void 3577 wpi_update_mcast(struct ieee80211com *ic) 3578 { 3579 /* Ignore */ 3580 } 3581 3582 static void 3583 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3584 { 3585 struct wpi_cmd_led led; 3586 3587 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3588 3589 led.which = which; 3590 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3591 led.off = off; 3592 led.on = on; 3593 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3594 } 3595 3596 static int 3597 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3598 { 3599 struct wpi_cmd_timing cmd; 3600 uint64_t val, mod; 3601 3602 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3603 3604 memset(&cmd, 0, sizeof cmd); 3605 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3606 cmd.bintval = htole16(ni->ni_intval); 3607 cmd.lintval = htole16(10); 3608 3609 /* Compute remaining time until next beacon. */ 3610 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3611 mod = le64toh(cmd.tstamp) % val; 3612 cmd.binitval = htole32((uint32_t)(val - mod)); 3613 3614 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3615 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3616 3617 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3618 } 3619 3620 /* 3621 * This function is called periodically (every 60 seconds) to adjust output 3622 * power to temperature changes. 3623 */ 3624 static void 3625 wpi_power_calibration(struct wpi_softc *sc) 3626 { 3627 int temp; 3628 3629 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3630 3631 /* Update sensor data. */ 3632 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3633 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3634 3635 /* Sanity-check read value. */ 3636 if (temp < -260 || temp > 25) { 3637 /* This can't be correct, ignore. */ 3638 DPRINTF(sc, WPI_DEBUG_TEMP, 3639 "out-of-range temperature reported: %d\n", temp); 3640 return; 3641 } 3642 3643 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3644 3645 /* Adjust Tx power if need be. */ 3646 if (abs(temp - sc->temp) <= 6) 3647 return; 3648 3649 sc->temp = temp; 3650 3651 if (wpi_set_txpower(sc, 1) != 0) { 3652 /* just warn, too bad for the automatic calibration... */ 3653 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3654 } 3655 } 3656 3657 /* 3658 * Set TX power for current channel. 3659 */ 3660 static int 3661 wpi_set_txpower(struct wpi_softc *sc, int async) 3662 { 3663 struct wpi_power_group *group; 3664 struct wpi_cmd_txpower cmd; 3665 uint8_t chan; 3666 int idx, is_chan_5ghz, i; 3667 3668 /* Retrieve current channel from last RXON. */ 3669 chan = sc->rxon.chan; 3670 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3671 3672 /* Find the TX power group to which this channel belongs. */ 3673 if (is_chan_5ghz) { 3674 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3675 if (chan <= group->chan) 3676 break; 3677 } else 3678 group = &sc->groups[0]; 3679 3680 memset(&cmd, 0, sizeof cmd); 3681 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3682 cmd.chan = htole16(chan); 3683 3684 /* Set TX power for all OFDM and CCK rates. */ 3685 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3686 /* Retrieve TX power for this channel/rate. */ 3687 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3688 3689 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3690 3691 if (is_chan_5ghz) { 3692 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3693 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3694 } else { 3695 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3696 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3697 } 3698 DPRINTF(sc, WPI_DEBUG_TEMP, 3699 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3700 } 3701 3702 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3703 } 3704 3705 /* 3706 * Determine Tx power index for a given channel/rate combination. 3707 * This takes into account the regulatory information from EEPROM and the 3708 * current temperature. 3709 */ 3710 static int 3711 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3712 uint8_t chan, int is_chan_5ghz, int ridx) 3713 { 3714 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3715 #define fdivround(a, b, n) \ 3716 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3717 3718 /* Linear interpolation. */ 3719 #define interpolate(x, x1, y1, x2, y2, n) \ 3720 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3721 3722 struct wpi_power_sample *sample; 3723 int pwr, idx; 3724 3725 /* Default TX power is group maximum TX power minus 3dB. */ 3726 pwr = group->maxpwr / 2; 3727 3728 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3729 switch (ridx) { 3730 case WPI_RIDX_OFDM36: 3731 pwr -= is_chan_5ghz ? 5 : 0; 3732 break; 3733 case WPI_RIDX_OFDM48: 3734 pwr -= is_chan_5ghz ? 10 : 7; 3735 break; 3736 case WPI_RIDX_OFDM54: 3737 pwr -= is_chan_5ghz ? 12 : 9; 3738 break; 3739 } 3740 3741 /* Never exceed the channel maximum allowed TX power. */ 3742 pwr = min(pwr, sc->maxpwr[chan]); 3743 3744 /* Retrieve TX power index into gain tables from samples. */ 3745 for (sample = group->samples; sample < &group->samples[3]; sample++) 3746 if (pwr > sample[1].power) 3747 break; 3748 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3749 idx = interpolate(pwr, sample[0].power, sample[0].index, 3750 sample[1].power, sample[1].index, 19); 3751 3752 /*- 3753 * Adjust power index based on current temperature: 3754 * - if cooler than factory-calibrated: decrease output power 3755 * - if warmer than factory-calibrated: increase output power 3756 */ 3757 idx -= (sc->temp - group->temp) * 11 / 100; 3758 3759 /* Decrease TX power for CCK rates (-5dB). */ 3760 if (ridx >= WPI_RIDX_CCK1) 3761 idx += 10; 3762 3763 /* Make sure idx stays in a valid range. */ 3764 if (idx < 0) 3765 return 0; 3766 if (idx > WPI_MAX_PWR_INDEX) 3767 return WPI_MAX_PWR_INDEX; 3768 return idx; 3769 3770 #undef interpolate 3771 #undef fdivround 3772 } 3773 3774 /* 3775 * Set STA mode power saving level (between 0 and 5). 3776 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3777 */ 3778 static int 3779 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3780 { 3781 struct wpi_pmgt_cmd cmd; 3782 const struct wpi_pmgt *pmgt; 3783 uint32_t max, reg; 3784 uint8_t skip_dtim; 3785 int i; 3786 3787 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3788 "%s: dtim=%d, level=%d, async=%d\n", 3789 __func__, dtim, level, async); 3790 3791 /* Select which PS parameters to use. */ 3792 if (dtim <= 10) 3793 pmgt = &wpi_pmgt[0][level]; 3794 else 3795 pmgt = &wpi_pmgt[1][level]; 3796 3797 memset(&cmd, 0, sizeof cmd); 3798 if (level != 0) /* not CAM */ 3799 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3800 /* Retrieve PCIe Active State Power Management (ASPM). */ 3801 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3802 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3803 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3804 3805 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3806 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3807 3808 if (dtim == 0) { 3809 dtim = 1; 3810 skip_dtim = 0; 3811 } else 3812 skip_dtim = pmgt->skip_dtim; 3813 3814 if (skip_dtim != 0) { 3815 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3816 max = pmgt->intval[4]; 3817 if (max == (uint32_t)-1) 3818 max = dtim * (skip_dtim + 1); 3819 else if (max > dtim) 3820 max = (max / dtim) * dtim; 3821 } else 3822 max = dtim; 3823 3824 for (i = 0; i < 5; i++) 3825 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3826 3827 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3828 } 3829 3830 static int 3831 wpi_send_btcoex(struct wpi_softc *sc) 3832 { 3833 struct wpi_bluetooth cmd; 3834 3835 memset(&cmd, 0, sizeof cmd); 3836 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3837 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3838 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3839 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3840 __func__); 3841 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3842 } 3843 3844 static int 3845 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3846 { 3847 int error; 3848 3849 if (async) 3850 WPI_RXON_LOCK_ASSERT(sc); 3851 3852 if (assoc && wpi_check_bss_filter(sc) != 0) { 3853 struct wpi_assoc rxon_assoc; 3854 3855 rxon_assoc.flags = sc->rxon.flags; 3856 rxon_assoc.filter = sc->rxon.filter; 3857 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3858 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3859 rxon_assoc.reserved = 0; 3860 3861 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3862 sizeof (struct wpi_assoc), async); 3863 if (error != 0) { 3864 device_printf(sc->sc_dev, 3865 "RXON_ASSOC command failed, error %d\n", error); 3866 return error; 3867 } 3868 } else { 3869 if (async) { 3870 WPI_NT_LOCK(sc); 3871 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3872 sizeof (struct wpi_rxon), async); 3873 if (error == 0) 3874 wpi_clear_node_table(sc); 3875 WPI_NT_UNLOCK(sc); 3876 } else { 3877 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3878 sizeof (struct wpi_rxon), async); 3879 if (error == 0) 3880 wpi_clear_node_table(sc); 3881 } 3882 3883 if (error != 0) { 3884 device_printf(sc->sc_dev, 3885 "RXON command failed, error %d\n", error); 3886 return error; 3887 } 3888 3889 /* Add broadcast node. */ 3890 error = wpi_add_broadcast_node(sc, async); 3891 if (error != 0) { 3892 device_printf(sc->sc_dev, 3893 "could not add broadcast node, error %d\n", error); 3894 return error; 3895 } 3896 } 3897 3898 /* Configuration has changed, set Tx power accordingly. */ 3899 if ((error = wpi_set_txpower(sc, async)) != 0) { 3900 device_printf(sc->sc_dev, 3901 "%s: could not set TX power, error %d\n", __func__, error); 3902 return error; 3903 } 3904 3905 return 0; 3906 } 3907 3908 /** 3909 * Configure the card to listen to a particular channel, this transisions the 3910 * card in to being able to receive frames from remote devices. 3911 */ 3912 static int 3913 wpi_config(struct wpi_softc *sc) 3914 { 3915 struct ieee80211com *ic = &sc->sc_ic; 3916 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3917 struct ieee80211_channel *c = ic->ic_curchan; 3918 int error; 3919 3920 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3921 3922 /* Set power saving level to CAM during initialization. */ 3923 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3924 device_printf(sc->sc_dev, 3925 "%s: could not set power saving level\n", __func__); 3926 return error; 3927 } 3928 3929 /* Configure bluetooth coexistence. */ 3930 if ((error = wpi_send_btcoex(sc)) != 0) { 3931 device_printf(sc->sc_dev, 3932 "could not configure bluetooth coexistence\n"); 3933 return error; 3934 } 3935 3936 /* Configure adapter. */ 3937 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3938 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3939 3940 /* Set default channel. */ 3941 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3942 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3943 if (IEEE80211_IS_CHAN_2GHZ(c)) 3944 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3945 3946 sc->rxon.filter = WPI_FILTER_MULTICAST; 3947 switch (ic->ic_opmode) { 3948 case IEEE80211_M_STA: 3949 sc->rxon.mode = WPI_MODE_STA; 3950 break; 3951 case IEEE80211_M_IBSS: 3952 sc->rxon.mode = WPI_MODE_IBSS; 3953 sc->rxon.filter |= WPI_FILTER_BEACON; 3954 break; 3955 case IEEE80211_M_HOSTAP: 3956 /* XXX workaround for beaconing */ 3957 sc->rxon.mode = WPI_MODE_IBSS; 3958 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3959 break; 3960 case IEEE80211_M_AHDEMO: 3961 sc->rxon.mode = WPI_MODE_HOSTAP; 3962 break; 3963 case IEEE80211_M_MONITOR: 3964 sc->rxon.mode = WPI_MODE_MONITOR; 3965 break; 3966 default: 3967 device_printf(sc->sc_dev, "unknown opmode %d\n", 3968 ic->ic_opmode); 3969 return EINVAL; 3970 } 3971 sc->rxon.filter = htole32(sc->rxon.filter); 3972 wpi_set_promisc(sc); 3973 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3974 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3975 3976 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3977 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3978 __func__); 3979 return error; 3980 } 3981 3982 /* Setup rate scalling. */ 3983 if ((error = wpi_mrr_setup(sc)) != 0) { 3984 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3985 error); 3986 return error; 3987 } 3988 3989 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3990 3991 return 0; 3992 } 3993 3994 static uint16_t 3995 wpi_get_active_dwell_time(struct wpi_softc *sc, 3996 struct ieee80211_channel *c, uint8_t n_probes) 3997 { 3998 /* No channel? Default to 2GHz settings. */ 3999 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4000 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4001 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4002 } 4003 4004 /* 5GHz dwell time. */ 4005 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4006 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4007 } 4008 4009 /* 4010 * Limit the total dwell time. 4011 * 4012 * Returns the dwell time in milliseconds. 4013 */ 4014 static uint16_t 4015 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4016 { 4017 struct ieee80211com *ic = &sc->sc_ic; 4018 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4019 uint16_t bintval = 0; 4020 4021 /* bintval is in TU (1.024mS) */ 4022 if (vap != NULL) 4023 bintval = vap->iv_bss->ni_intval; 4024 4025 /* 4026 * If it's non-zero, we should calculate the minimum of 4027 * it and the DWELL_BASE. 4028 * 4029 * XXX Yes, the math should take into account that bintval 4030 * is 1.024mS, not 1mS.. 4031 */ 4032 if (bintval > 0) { 4033 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4034 bintval); 4035 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4036 } 4037 4038 /* No association context? Default. */ 4039 return dwell_time; 4040 } 4041 4042 static uint16_t 4043 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4044 { 4045 uint16_t passive; 4046 4047 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4048 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4049 else 4050 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4051 4052 /* Clamp to the beacon interval if we're associated. */ 4053 return (wpi_limit_dwell(sc, passive)); 4054 } 4055 4056 static uint32_t 4057 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4058 { 4059 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4060 uint32_t nbeacons = time / bintval; 4061 4062 if (mod > WPI_PAUSE_MAX_TIME) 4063 mod = WPI_PAUSE_MAX_TIME; 4064 4065 return WPI_PAUSE_SCAN(nbeacons, mod); 4066 } 4067 4068 /* 4069 * Send a scan request to the firmware. 4070 */ 4071 static int 4072 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4073 { 4074 struct ieee80211com *ic = &sc->sc_ic; 4075 struct ieee80211_scan_state *ss = ic->ic_scan; 4076 struct ieee80211vap *vap = ss->ss_vap; 4077 struct wpi_scan_hdr *hdr; 4078 struct wpi_cmd_data *tx; 4079 struct wpi_scan_essid *essids; 4080 struct wpi_scan_chan *chan; 4081 struct ieee80211_frame *wh; 4082 struct ieee80211_rateset *rs; 4083 uint16_t bintval, buflen, dwell_active, dwell_passive; 4084 uint8_t *buf, *frm, i, nssid; 4085 int bgscan, error; 4086 4087 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4088 4089 /* 4090 * We are absolutely not allowed to send a scan command when another 4091 * scan command is pending. 4092 */ 4093 if (callout_pending(&sc->scan_timeout)) { 4094 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4095 __func__); 4096 error = EAGAIN; 4097 goto fail; 4098 } 4099 4100 bgscan = wpi_check_bss_filter(sc); 4101 bintval = vap->iv_bss->ni_intval; 4102 if (bgscan != 0 && 4103 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4104 error = EOPNOTSUPP; 4105 goto fail; 4106 } 4107 4108 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4109 if (buf == NULL) { 4110 device_printf(sc->sc_dev, 4111 "%s: could not allocate buffer for scan command\n", 4112 __func__); 4113 error = ENOMEM; 4114 goto fail; 4115 } 4116 hdr = (struct wpi_scan_hdr *)buf; 4117 4118 /* 4119 * Move to the next channel if no packets are received within 10 msecs 4120 * after sending the probe request. 4121 */ 4122 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4123 hdr->quiet_threshold = htole16(1); 4124 4125 if (bgscan != 0) { 4126 /* 4127 * Max needs to be greater than active and passive and quiet! 4128 * It's also in microseconds! 4129 */ 4130 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4131 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4132 bintval)); 4133 } 4134 4135 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4136 4137 tx = (struct wpi_cmd_data *)(hdr + 1); 4138 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4139 tx->id = WPI_ID_BROADCAST; 4140 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4141 4142 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4143 /* Send probe requests at 6Mbps. */ 4144 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4145 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4146 } else { 4147 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4148 /* Send probe requests at 1Mbps. */ 4149 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4150 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4151 } 4152 4153 essids = (struct wpi_scan_essid *)(tx + 1); 4154 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4155 for (i = 0; i < nssid; i++) { 4156 essids[i].id = IEEE80211_ELEMID_SSID; 4157 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4158 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4159 #ifdef WPI_DEBUG 4160 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4161 printf("Scanning Essid: "); 4162 ieee80211_print_essid(essids[i].data, essids[i].len); 4163 printf("\n"); 4164 } 4165 #endif 4166 } 4167 4168 /* 4169 * Build a probe request frame. Most of the following code is a 4170 * copy & paste of what is done in net80211. 4171 */ 4172 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4173 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4174 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4175 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4176 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4177 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4178 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4179 4180 frm = (uint8_t *)(wh + 1); 4181 frm = ieee80211_add_ssid(frm, NULL, 0); 4182 frm = ieee80211_add_rates(frm, rs); 4183 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4184 frm = ieee80211_add_xrates(frm, rs); 4185 4186 /* Set length of probe request. */ 4187 tx->len = htole16(frm - (uint8_t *)wh); 4188 4189 /* 4190 * Construct information about the channel that we 4191 * want to scan. The firmware expects this to be directly 4192 * after the scan probe request 4193 */ 4194 chan = (struct wpi_scan_chan *)frm; 4195 chan->chan = ieee80211_chan2ieee(ic, c); 4196 chan->flags = 0; 4197 if (nssid) { 4198 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4199 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4200 } else 4201 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4202 4203 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4204 chan->flags |= WPI_CHAN_ACTIVE; 4205 4206 /* 4207 * Calculate the active/passive dwell times. 4208 */ 4209 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4210 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4211 4212 /* Make sure they're valid. */ 4213 if (dwell_active > dwell_passive) 4214 dwell_active = dwell_passive; 4215 4216 chan->active = htole16(dwell_active); 4217 chan->passive = htole16(dwell_passive); 4218 4219 chan->dsp_gain = 0x6e; /* Default level */ 4220 4221 if (IEEE80211_IS_CHAN_5GHZ(c)) 4222 chan->rf_gain = 0x3b; 4223 else 4224 chan->rf_gain = 0x28; 4225 4226 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4227 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4228 4229 hdr->nchan++; 4230 4231 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4232 /* XXX Force probe request transmission. */ 4233 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4234 4235 chan++; 4236 4237 /* Reduce unnecessary delay. */ 4238 chan->flags = 0; 4239 chan->passive = chan->active = hdr->quiet_time; 4240 4241 hdr->nchan++; 4242 } 4243 4244 chan++; 4245 4246 buflen = (uint8_t *)chan - buf; 4247 hdr->len = htole16(buflen); 4248 4249 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4250 hdr->nchan); 4251 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4252 free(buf, M_DEVBUF); 4253 4254 if (error != 0) 4255 goto fail; 4256 4257 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4258 4259 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4260 4261 return 0; 4262 4263 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4264 4265 return error; 4266 } 4267 4268 static int 4269 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4270 { 4271 struct ieee80211com *ic = vap->iv_ic; 4272 struct ieee80211_node *ni = vap->iv_bss; 4273 struct ieee80211_channel *c = ni->ni_chan; 4274 int error; 4275 4276 WPI_RXON_LOCK(sc); 4277 4278 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4279 4280 /* Update adapter configuration. */ 4281 sc->rxon.associd = 0; 4282 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4283 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4284 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4285 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4286 if (IEEE80211_IS_CHAN_2GHZ(c)) 4287 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4288 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4289 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4290 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4291 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4292 if (IEEE80211_IS_CHAN_A(c)) { 4293 sc->rxon.cck_mask = 0; 4294 sc->rxon.ofdm_mask = 0x15; 4295 } else if (IEEE80211_IS_CHAN_B(c)) { 4296 sc->rxon.cck_mask = 0x03; 4297 sc->rxon.ofdm_mask = 0; 4298 } else { 4299 /* Assume 802.11b/g. */ 4300 sc->rxon.cck_mask = 0x0f; 4301 sc->rxon.ofdm_mask = 0x15; 4302 } 4303 4304 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4305 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4306 sc->rxon.ofdm_mask); 4307 4308 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4309 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4310 __func__); 4311 } 4312 4313 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4314 4315 WPI_RXON_UNLOCK(sc); 4316 4317 return error; 4318 } 4319 4320 static int 4321 wpi_config_beacon(struct wpi_vap *wvp) 4322 { 4323 struct ieee80211vap *vap = &wvp->wv_vap; 4324 struct ieee80211com *ic = vap->iv_ic; 4325 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4326 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4327 struct wpi_softc *sc = ic->ic_softc; 4328 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4329 struct ieee80211_tim_ie *tie; 4330 struct mbuf *m; 4331 uint8_t *ptr; 4332 int error; 4333 4334 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4335 4336 WPI_VAP_LOCK_ASSERT(wvp); 4337 4338 cmd->len = htole16(bcn->m->m_pkthdr.len); 4339 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4340 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4341 4342 /* XXX seems to be unused */ 4343 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4344 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4345 ptr = mtod(bcn->m, uint8_t *); 4346 4347 cmd->tim = htole16(bo->bo_tim - ptr); 4348 cmd->timsz = tie->tim_len; 4349 } 4350 4351 /* Necessary for recursion in ieee80211_beacon_update(). */ 4352 m = bcn->m; 4353 bcn->m = m_dup(m, M_NOWAIT); 4354 if (bcn->m == NULL) { 4355 device_printf(sc->sc_dev, 4356 "%s: could not copy beacon frame\n", __func__); 4357 error = ENOMEM; 4358 goto end; 4359 } 4360 4361 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4362 device_printf(sc->sc_dev, 4363 "%s: could not update beacon frame, error %d", __func__, 4364 error); 4365 m_freem(bcn->m); 4366 } 4367 4368 /* Restore mbuf. */ 4369 end: bcn->m = m; 4370 4371 return error; 4372 } 4373 4374 static int 4375 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4376 { 4377 struct ieee80211vap *vap = ni->ni_vap; 4378 struct wpi_vap *wvp = WPI_VAP(vap); 4379 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4380 struct mbuf *m; 4381 int error; 4382 4383 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4384 4385 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4386 return EINVAL; 4387 4388 m = ieee80211_beacon_alloc(ni); 4389 if (m == NULL) { 4390 device_printf(sc->sc_dev, 4391 "%s: could not allocate beacon frame\n", __func__); 4392 return ENOMEM; 4393 } 4394 4395 WPI_VAP_LOCK(wvp); 4396 if (bcn->m != NULL) 4397 m_freem(bcn->m); 4398 4399 bcn->m = m; 4400 4401 error = wpi_config_beacon(wvp); 4402 WPI_VAP_UNLOCK(wvp); 4403 4404 return error; 4405 } 4406 4407 static void 4408 wpi_update_beacon(struct ieee80211vap *vap, int item) 4409 { 4410 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4411 struct wpi_vap *wvp = WPI_VAP(vap); 4412 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4413 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4414 struct ieee80211_node *ni = vap->iv_bss; 4415 int mcast = 0; 4416 4417 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4418 4419 WPI_VAP_LOCK(wvp); 4420 if (bcn->m == NULL) { 4421 bcn->m = ieee80211_beacon_alloc(ni); 4422 if (bcn->m == NULL) { 4423 device_printf(sc->sc_dev, 4424 "%s: could not allocate beacon frame\n", __func__); 4425 4426 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4427 __func__); 4428 4429 WPI_VAP_UNLOCK(wvp); 4430 return; 4431 } 4432 } 4433 WPI_VAP_UNLOCK(wvp); 4434 4435 if (item == IEEE80211_BEACON_TIM) 4436 mcast = 1; /* TODO */ 4437 4438 setbit(bo->bo_flags, item); 4439 ieee80211_beacon_update(ni, bcn->m, mcast); 4440 4441 WPI_VAP_LOCK(wvp); 4442 wpi_config_beacon(wvp); 4443 WPI_VAP_UNLOCK(wvp); 4444 4445 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4446 } 4447 4448 static void 4449 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4450 { 4451 struct ieee80211vap *vap = ni->ni_vap; 4452 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4453 struct wpi_node *wn = WPI_NODE(ni); 4454 int error; 4455 4456 WPI_NT_LOCK(sc); 4457 4458 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4459 4460 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4461 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4462 device_printf(sc->sc_dev, 4463 "%s: could not add IBSS node, error %d\n", 4464 __func__, error); 4465 } 4466 } 4467 WPI_NT_UNLOCK(sc); 4468 } 4469 4470 static int 4471 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4472 { 4473 struct ieee80211com *ic = vap->iv_ic; 4474 struct ieee80211_node *ni = vap->iv_bss; 4475 struct ieee80211_channel *c = ni->ni_chan; 4476 int error; 4477 4478 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4479 4480 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4481 /* Link LED blinks while monitoring. */ 4482 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4483 return 0; 4484 } 4485 4486 /* XXX kernel panic workaround */ 4487 if (c == IEEE80211_CHAN_ANYC) { 4488 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4489 __func__); 4490 return EINVAL; 4491 } 4492 4493 if ((error = wpi_set_timing(sc, ni)) != 0) { 4494 device_printf(sc->sc_dev, 4495 "%s: could not set timing, error %d\n", __func__, error); 4496 return error; 4497 } 4498 4499 /* Update adapter configuration. */ 4500 WPI_RXON_LOCK(sc); 4501 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4502 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4503 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4504 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4505 if (IEEE80211_IS_CHAN_2GHZ(c)) 4506 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4507 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4508 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4509 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4510 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4511 if (IEEE80211_IS_CHAN_A(c)) { 4512 sc->rxon.cck_mask = 0; 4513 sc->rxon.ofdm_mask = 0x15; 4514 } else if (IEEE80211_IS_CHAN_B(c)) { 4515 sc->rxon.cck_mask = 0x03; 4516 sc->rxon.ofdm_mask = 0; 4517 } else { 4518 /* Assume 802.11b/g. */ 4519 sc->rxon.cck_mask = 0x0f; 4520 sc->rxon.ofdm_mask = 0x15; 4521 } 4522 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4523 4524 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4525 sc->rxon.chan, sc->rxon.flags); 4526 4527 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4528 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4529 __func__); 4530 return error; 4531 } 4532 4533 /* Start periodic calibration timer. */ 4534 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4535 4536 WPI_RXON_UNLOCK(sc); 4537 4538 if (vap->iv_opmode == IEEE80211_M_IBSS || 4539 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4540 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4541 device_printf(sc->sc_dev, 4542 "%s: could not setup beacon, error %d\n", __func__, 4543 error); 4544 return error; 4545 } 4546 } 4547 4548 if (vap->iv_opmode == IEEE80211_M_STA) { 4549 /* Add BSS node. */ 4550 WPI_NT_LOCK(sc); 4551 error = wpi_add_sta_node(sc, ni); 4552 WPI_NT_UNLOCK(sc); 4553 if (error != 0) { 4554 device_printf(sc->sc_dev, 4555 "%s: could not add BSS node, error %d\n", __func__, 4556 error); 4557 return error; 4558 } 4559 } 4560 4561 /* Link LED always on while associated. */ 4562 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4563 4564 /* Enable power-saving mode if requested by user. */ 4565 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4566 vap->iv_opmode != IEEE80211_M_IBSS) 4567 (void)wpi_set_pslevel(sc, 0, 3, 1); 4568 4569 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4570 4571 return 0; 4572 } 4573 4574 static int 4575 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4576 { 4577 const struct ieee80211_cipher *cip = k->wk_cipher; 4578 struct ieee80211vap *vap = ni->ni_vap; 4579 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4580 struct wpi_node *wn = WPI_NODE(ni); 4581 struct wpi_node_info node; 4582 uint16_t kflags; 4583 int error; 4584 4585 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4586 4587 if (wpi_check_node_entry(sc, wn->id) == 0) { 4588 device_printf(sc->sc_dev, "%s: node does not exist\n", 4589 __func__); 4590 return 0; 4591 } 4592 4593 switch (cip->ic_cipher) { 4594 case IEEE80211_CIPHER_AES_CCM: 4595 kflags = WPI_KFLAG_CCMP; 4596 break; 4597 4598 default: 4599 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4600 cip->ic_cipher); 4601 return 0; 4602 } 4603 4604 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4605 if (k->wk_flags & IEEE80211_KEY_GROUP) 4606 kflags |= WPI_KFLAG_MULTICAST; 4607 4608 memset(&node, 0, sizeof node); 4609 node.id = wn->id; 4610 node.control = WPI_NODE_UPDATE; 4611 node.flags = WPI_FLAG_KEY_SET; 4612 node.kflags = htole16(kflags); 4613 memcpy(node.key, k->wk_key, k->wk_keylen); 4614 again: 4615 DPRINTF(sc, WPI_DEBUG_KEY, 4616 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4617 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4618 node.id, ether_sprintf(ni->ni_macaddr)); 4619 4620 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4621 if (error != 0) { 4622 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4623 error); 4624 return !error; 4625 } 4626 4627 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4628 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4629 kflags |= WPI_KFLAG_MULTICAST; 4630 node.kflags = htole16(kflags); 4631 4632 goto again; 4633 } 4634 4635 return 1; 4636 } 4637 4638 static void 4639 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4640 { 4641 const struct ieee80211_key *k = arg; 4642 struct ieee80211vap *vap = ni->ni_vap; 4643 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4644 struct wpi_node *wn = WPI_NODE(ni); 4645 int error; 4646 4647 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4648 return; 4649 4650 WPI_NT_LOCK(sc); 4651 error = wpi_load_key(ni, k); 4652 WPI_NT_UNLOCK(sc); 4653 4654 if (error == 0) { 4655 device_printf(sc->sc_dev, "%s: error while setting key\n", 4656 __func__); 4657 } 4658 } 4659 4660 static int 4661 wpi_set_global_keys(struct ieee80211_node *ni) 4662 { 4663 struct ieee80211vap *vap = ni->ni_vap; 4664 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4665 int error = 1; 4666 4667 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4668 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4669 error = wpi_load_key(ni, wk); 4670 4671 return !error; 4672 } 4673 4674 static int 4675 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4676 { 4677 struct ieee80211vap *vap = ni->ni_vap; 4678 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4679 struct wpi_node *wn = WPI_NODE(ni); 4680 struct wpi_node_info node; 4681 uint16_t kflags; 4682 int error; 4683 4684 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4685 4686 if (wpi_check_node_entry(sc, wn->id) == 0) { 4687 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4688 return 1; /* Nothing to do. */ 4689 } 4690 4691 kflags = WPI_KFLAG_KID(k->wk_keyix); 4692 if (k->wk_flags & IEEE80211_KEY_GROUP) 4693 kflags |= WPI_KFLAG_MULTICAST; 4694 4695 memset(&node, 0, sizeof node); 4696 node.id = wn->id; 4697 node.control = WPI_NODE_UPDATE; 4698 node.flags = WPI_FLAG_KEY_SET; 4699 node.kflags = htole16(kflags); 4700 again: 4701 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4702 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4703 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4704 4705 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4706 if (error != 0) { 4707 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4708 error); 4709 return !error; 4710 } 4711 4712 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4713 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4714 kflags |= WPI_KFLAG_MULTICAST; 4715 node.kflags = htole16(kflags); 4716 4717 goto again; 4718 } 4719 4720 return 1; 4721 } 4722 4723 static void 4724 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4725 { 4726 const struct ieee80211_key *k = arg; 4727 struct ieee80211vap *vap = ni->ni_vap; 4728 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4729 struct wpi_node *wn = WPI_NODE(ni); 4730 int error; 4731 4732 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4733 return; 4734 4735 WPI_NT_LOCK(sc); 4736 error = wpi_del_key(ni, k); 4737 WPI_NT_UNLOCK(sc); 4738 4739 if (error == 0) { 4740 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4741 __func__); 4742 } 4743 } 4744 4745 static int 4746 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4747 int set) 4748 { 4749 struct ieee80211com *ic = vap->iv_ic; 4750 struct wpi_softc *sc = ic->ic_softc; 4751 struct wpi_vap *wvp = WPI_VAP(vap); 4752 struct ieee80211_node *ni; 4753 int error, ni_ref = 0; 4754 4755 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4756 4757 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4758 /* Not for us. */ 4759 return 1; 4760 } 4761 4762 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4763 /* XMIT keys are handled in wpi_tx_data(). */ 4764 return 1; 4765 } 4766 4767 /* Handle group keys. */ 4768 if (&vap->iv_nw_keys[0] <= k && 4769 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4770 WPI_NT_LOCK(sc); 4771 if (set) 4772 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4773 else 4774 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4775 WPI_NT_UNLOCK(sc); 4776 4777 if (vap->iv_state == IEEE80211_S_RUN) { 4778 ieee80211_iterate_nodes(&ic->ic_sta, 4779 set ? wpi_load_key_cb : wpi_del_key_cb, 4780 __DECONST(void *, k)); 4781 } 4782 4783 return 1; 4784 } 4785 4786 switch (vap->iv_opmode) { 4787 case IEEE80211_M_STA: 4788 ni = vap->iv_bss; 4789 break; 4790 4791 case IEEE80211_M_IBSS: 4792 case IEEE80211_M_AHDEMO: 4793 case IEEE80211_M_HOSTAP: 4794 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4795 if (ni == NULL) 4796 return 0; /* should not happen */ 4797 4798 ni_ref = 1; 4799 break; 4800 4801 default: 4802 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4803 vap->iv_opmode); 4804 return 0; 4805 } 4806 4807 WPI_NT_LOCK(sc); 4808 if (set) 4809 error = wpi_load_key(ni, k); 4810 else 4811 error = wpi_del_key(ni, k); 4812 WPI_NT_UNLOCK(sc); 4813 4814 if (ni_ref) 4815 ieee80211_node_decref(ni); 4816 4817 return error; 4818 } 4819 4820 static int 4821 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4822 { 4823 return wpi_process_key(vap, k, 1); 4824 } 4825 4826 static int 4827 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4828 { 4829 return wpi_process_key(vap, k, 0); 4830 } 4831 4832 /* 4833 * This function is called after the runtime firmware notifies us of its 4834 * readiness (called in a process context). 4835 */ 4836 static int 4837 wpi_post_alive(struct wpi_softc *sc) 4838 { 4839 int ntries, error; 4840 4841 /* Check (again) that the radio is not disabled. */ 4842 if ((error = wpi_nic_lock(sc)) != 0) 4843 return error; 4844 4845 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4846 4847 /* NB: Runtime firmware must be up and running. */ 4848 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4849 device_printf(sc->sc_dev, 4850 "RF switch: radio disabled (%s)\n", __func__); 4851 wpi_nic_unlock(sc); 4852 return EPERM; /* :-) */ 4853 } 4854 wpi_nic_unlock(sc); 4855 4856 /* Wait for thermal sensor to calibrate. */ 4857 for (ntries = 0; ntries < 1000; ntries++) { 4858 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4859 break; 4860 DELAY(10); 4861 } 4862 4863 if (ntries == 1000) { 4864 device_printf(sc->sc_dev, 4865 "timeout waiting for thermal sensor calibration\n"); 4866 return ETIMEDOUT; 4867 } 4868 4869 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4870 return 0; 4871 } 4872 4873 /* 4874 * The firmware boot code is small and is intended to be copied directly into 4875 * the NIC internal memory (no DMA transfer). 4876 */ 4877 static int 4878 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4879 { 4880 int error, ntries; 4881 4882 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4883 4884 size /= sizeof (uint32_t); 4885 4886 if ((error = wpi_nic_lock(sc)) != 0) 4887 return error; 4888 4889 /* Copy microcode image into NIC memory. */ 4890 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4891 (const uint32_t *)ucode, size); 4892 4893 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4894 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4895 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4896 4897 /* Start boot load now. */ 4898 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4899 4900 /* Wait for transfer to complete. */ 4901 for (ntries = 0; ntries < 1000; ntries++) { 4902 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4903 DPRINTF(sc, WPI_DEBUG_HW, 4904 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4905 WPI_FH_TX_STATUS_IDLE(6), 4906 status & WPI_FH_TX_STATUS_IDLE(6)); 4907 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4908 DPRINTF(sc, WPI_DEBUG_HW, 4909 "Status Match! - ntries = %d\n", ntries); 4910 break; 4911 } 4912 DELAY(10); 4913 } 4914 if (ntries == 1000) { 4915 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4916 __func__); 4917 wpi_nic_unlock(sc); 4918 return ETIMEDOUT; 4919 } 4920 4921 /* Enable boot after power up. */ 4922 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4923 4924 wpi_nic_unlock(sc); 4925 return 0; 4926 } 4927 4928 static int 4929 wpi_load_firmware(struct wpi_softc *sc) 4930 { 4931 struct wpi_fw_info *fw = &sc->fw; 4932 struct wpi_dma_info *dma = &sc->fw_dma; 4933 int error; 4934 4935 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4936 4937 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4938 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4939 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4940 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4941 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4942 4943 /* Tell adapter where to find initialization sections. */ 4944 if ((error = wpi_nic_lock(sc)) != 0) 4945 return error; 4946 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4947 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4948 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4949 dma->paddr + WPI_FW_DATA_MAXSZ); 4950 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4951 wpi_nic_unlock(sc); 4952 4953 /* Load firmware boot code. */ 4954 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4955 if (error != 0) { 4956 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4957 __func__); 4958 return error; 4959 } 4960 4961 /* Now press "execute". */ 4962 WPI_WRITE(sc, WPI_RESET, 0); 4963 4964 /* Wait at most one second for first alive notification. */ 4965 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4966 device_printf(sc->sc_dev, 4967 "%s: timeout waiting for adapter to initialize, error %d\n", 4968 __func__, error); 4969 return error; 4970 } 4971 4972 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4973 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4974 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4975 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4976 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4977 4978 /* Tell adapter where to find runtime sections. */ 4979 if ((error = wpi_nic_lock(sc)) != 0) 4980 return error; 4981 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4982 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4983 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4984 dma->paddr + WPI_FW_DATA_MAXSZ); 4985 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4986 WPI_FW_UPDATED | fw->main.textsz); 4987 wpi_nic_unlock(sc); 4988 4989 return 0; 4990 } 4991 4992 static int 4993 wpi_read_firmware(struct wpi_softc *sc) 4994 { 4995 const struct firmware *fp; 4996 struct wpi_fw_info *fw = &sc->fw; 4997 const struct wpi_firmware_hdr *hdr; 4998 int error; 4999 5000 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5001 5002 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5003 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5004 5005 WPI_UNLOCK(sc); 5006 fp = firmware_get(WPI_FW_NAME); 5007 WPI_LOCK(sc); 5008 5009 if (fp == NULL) { 5010 device_printf(sc->sc_dev, 5011 "could not load firmware image '%s'\n", WPI_FW_NAME); 5012 return EINVAL; 5013 } 5014 5015 sc->fw_fp = fp; 5016 5017 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5018 device_printf(sc->sc_dev, 5019 "firmware file too short: %zu bytes\n", fp->datasize); 5020 error = EINVAL; 5021 goto fail; 5022 } 5023 5024 fw->size = fp->datasize; 5025 fw->data = (const uint8_t *)fp->data; 5026 5027 /* Extract firmware header information. */ 5028 hdr = (const struct wpi_firmware_hdr *)fw->data; 5029 5030 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5031 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5032 5033 fw->main.textsz = le32toh(hdr->rtextsz); 5034 fw->main.datasz = le32toh(hdr->rdatasz); 5035 fw->init.textsz = le32toh(hdr->itextsz); 5036 fw->init.datasz = le32toh(hdr->idatasz); 5037 fw->boot.textsz = le32toh(hdr->btextsz); 5038 fw->boot.datasz = 0; 5039 5040 /* Sanity-check firmware header. */ 5041 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5042 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5043 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5044 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5045 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5046 (fw->boot.textsz & 3) != 0) { 5047 device_printf(sc->sc_dev, "invalid firmware header\n"); 5048 error = EINVAL; 5049 goto fail; 5050 } 5051 5052 /* Check that all firmware sections fit. */ 5053 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5054 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5055 device_printf(sc->sc_dev, 5056 "firmware file too short: %zu bytes\n", fw->size); 5057 error = EINVAL; 5058 goto fail; 5059 } 5060 5061 /* Get pointers to firmware sections. */ 5062 fw->main.text = (const uint8_t *)(hdr + 1); 5063 fw->main.data = fw->main.text + fw->main.textsz; 5064 fw->init.text = fw->main.data + fw->main.datasz; 5065 fw->init.data = fw->init.text + fw->init.textsz; 5066 fw->boot.text = fw->init.data + fw->init.datasz; 5067 5068 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5069 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5070 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5071 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5072 fw->main.textsz, fw->main.datasz, 5073 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5074 5075 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5076 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5077 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5078 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5079 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5080 5081 return 0; 5082 5083 fail: wpi_unload_firmware(sc); 5084 return error; 5085 } 5086 5087 /** 5088 * Free the referenced firmware image 5089 */ 5090 static void 5091 wpi_unload_firmware(struct wpi_softc *sc) 5092 { 5093 if (sc->fw_fp != NULL) { 5094 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5095 sc->fw_fp = NULL; 5096 } 5097 } 5098 5099 static int 5100 wpi_clock_wait(struct wpi_softc *sc) 5101 { 5102 int ntries; 5103 5104 /* Set "initialization complete" bit. */ 5105 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5106 5107 /* Wait for clock stabilization. */ 5108 for (ntries = 0; ntries < 2500; ntries++) { 5109 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5110 return 0; 5111 DELAY(100); 5112 } 5113 device_printf(sc->sc_dev, 5114 "%s: timeout waiting for clock stabilization\n", __func__); 5115 5116 return ETIMEDOUT; 5117 } 5118 5119 static int 5120 wpi_apm_init(struct wpi_softc *sc) 5121 { 5122 uint32_t reg; 5123 int error; 5124 5125 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5126 5127 /* Disable L0s exit timer (NMI bug workaround). */ 5128 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5129 /* Don't wait for ICH L0s (ICH bug workaround). */ 5130 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5131 5132 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5133 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5134 5135 /* Retrieve PCIe Active State Power Management (ASPM). */ 5136 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5137 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5138 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5139 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5140 else 5141 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5142 5143 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5144 5145 /* Wait for clock stabilization before accessing prph. */ 5146 if ((error = wpi_clock_wait(sc)) != 0) 5147 return error; 5148 5149 if ((error = wpi_nic_lock(sc)) != 0) 5150 return error; 5151 /* Cleanup. */ 5152 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5153 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5154 5155 /* Enable DMA and BSM (Bootstrap State Machine). */ 5156 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5157 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5158 DELAY(20); 5159 /* Disable L1-Active. */ 5160 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5161 wpi_nic_unlock(sc); 5162 5163 return 0; 5164 } 5165 5166 static void 5167 wpi_apm_stop_master(struct wpi_softc *sc) 5168 { 5169 int ntries; 5170 5171 /* Stop busmaster DMA activity. */ 5172 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5173 5174 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5175 WPI_GP_CNTRL_MAC_PS) 5176 return; /* Already asleep. */ 5177 5178 for (ntries = 0; ntries < 100; ntries++) { 5179 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5180 return; 5181 DELAY(10); 5182 } 5183 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5184 __func__); 5185 } 5186 5187 static void 5188 wpi_apm_stop(struct wpi_softc *sc) 5189 { 5190 wpi_apm_stop_master(sc); 5191 5192 /* Reset the entire device. */ 5193 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5194 DELAY(10); 5195 /* Clear "initialization complete" bit. */ 5196 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5197 } 5198 5199 static void 5200 wpi_nic_config(struct wpi_softc *sc) 5201 { 5202 uint32_t rev; 5203 5204 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5205 5206 /* voodoo from the Linux "driver".. */ 5207 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5208 if ((rev & 0xc0) == 0x40) 5209 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5210 else if (!(rev & 0x80)) 5211 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5212 5213 if (sc->cap == 0x80) 5214 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5215 5216 if ((sc->rev & 0xf0) == 0xd0) 5217 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5218 else 5219 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5220 5221 if (sc->type > 1) 5222 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5223 } 5224 5225 static int 5226 wpi_hw_init(struct wpi_softc *sc) 5227 { 5228 uint8_t chnl; 5229 int ntries, error; 5230 5231 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5232 5233 /* Clear pending interrupts. */ 5234 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5235 5236 if ((error = wpi_apm_init(sc)) != 0) { 5237 device_printf(sc->sc_dev, 5238 "%s: could not power ON adapter, error %d\n", __func__, 5239 error); 5240 return error; 5241 } 5242 5243 /* Select VMAIN power source. */ 5244 if ((error = wpi_nic_lock(sc)) != 0) 5245 return error; 5246 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5247 wpi_nic_unlock(sc); 5248 /* Spin until VMAIN gets selected. */ 5249 for (ntries = 0; ntries < 5000; ntries++) { 5250 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5251 break; 5252 DELAY(10); 5253 } 5254 if (ntries == 5000) { 5255 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5256 return ETIMEDOUT; 5257 } 5258 5259 /* Perform adapter initialization. */ 5260 wpi_nic_config(sc); 5261 5262 /* Initialize RX ring. */ 5263 if ((error = wpi_nic_lock(sc)) != 0) 5264 return error; 5265 /* Set physical address of RX ring. */ 5266 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5267 /* Set physical address of RX read pointer. */ 5268 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5269 offsetof(struct wpi_shared, next)); 5270 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5271 /* Enable RX. */ 5272 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5273 WPI_FH_RX_CONFIG_DMA_ENA | 5274 WPI_FH_RX_CONFIG_RDRBD_ENA | 5275 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5276 WPI_FH_RX_CONFIG_MAXFRAG | 5277 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5278 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5279 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5280 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5281 wpi_nic_unlock(sc); 5282 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5283 5284 /* Initialize TX rings. */ 5285 if ((error = wpi_nic_lock(sc)) != 0) 5286 return error; 5287 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5288 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5289 /* Enable all 6 TX rings. */ 5290 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5291 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5292 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5293 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5294 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5295 /* Set physical address of TX rings. */ 5296 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5297 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5298 5299 /* Enable all DMA channels. */ 5300 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5301 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5302 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5303 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5304 } 5305 wpi_nic_unlock(sc); 5306 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5307 5308 /* Clear "radio off" and "commands blocked" bits. */ 5309 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5310 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5311 5312 /* Clear pending interrupts. */ 5313 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5314 /* Enable interrupts. */ 5315 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5316 5317 /* _Really_ make sure "radio off" bit is cleared! */ 5318 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5319 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5320 5321 if ((error = wpi_load_firmware(sc)) != 0) { 5322 device_printf(sc->sc_dev, 5323 "%s: could not load firmware, error %d\n", __func__, 5324 error); 5325 return error; 5326 } 5327 /* Wait at most one second for firmware alive notification. */ 5328 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5329 device_printf(sc->sc_dev, 5330 "%s: timeout waiting for adapter to initialize, error %d\n", 5331 __func__, error); 5332 return error; 5333 } 5334 5335 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5336 5337 /* Do post-firmware initialization. */ 5338 return wpi_post_alive(sc); 5339 } 5340 5341 static void 5342 wpi_hw_stop(struct wpi_softc *sc) 5343 { 5344 uint8_t chnl, qid; 5345 int ntries; 5346 5347 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5348 5349 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5350 wpi_nic_lock(sc); 5351 5352 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5353 5354 /* Disable interrupts. */ 5355 WPI_WRITE(sc, WPI_INT_MASK, 0); 5356 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5357 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5358 5359 /* Make sure we no longer hold the NIC lock. */ 5360 wpi_nic_unlock(sc); 5361 5362 if (wpi_nic_lock(sc) == 0) { 5363 /* Stop TX scheduler. */ 5364 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5365 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5366 5367 /* Stop all DMA channels. */ 5368 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5369 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5370 for (ntries = 0; ntries < 200; ntries++) { 5371 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5372 WPI_FH_TX_STATUS_IDLE(chnl)) 5373 break; 5374 DELAY(10); 5375 } 5376 } 5377 wpi_nic_unlock(sc); 5378 } 5379 5380 /* Stop RX ring. */ 5381 wpi_reset_rx_ring(sc); 5382 5383 /* Reset all TX rings. */ 5384 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5385 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5386 5387 if (wpi_nic_lock(sc) == 0) { 5388 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5389 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5390 wpi_nic_unlock(sc); 5391 } 5392 DELAY(5); 5393 /* Power OFF adapter. */ 5394 wpi_apm_stop(sc); 5395 } 5396 5397 static void 5398 wpi_radio_on(void *arg0, int pending) 5399 { 5400 struct wpi_softc *sc = arg0; 5401 struct ieee80211com *ic = &sc->sc_ic; 5402 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5403 5404 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5405 5406 WPI_LOCK(sc); 5407 callout_stop(&sc->watchdog_rfkill); 5408 WPI_UNLOCK(sc); 5409 5410 if (vap != NULL) 5411 ieee80211_init(vap); 5412 } 5413 5414 static void 5415 wpi_radio_off(void *arg0, int pending) 5416 { 5417 struct wpi_softc *sc = arg0; 5418 struct ieee80211com *ic = &sc->sc_ic; 5419 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5420 5421 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5422 5423 ieee80211_notify_radio(ic, 0); 5424 wpi_stop(sc); 5425 if (vap != NULL) 5426 ieee80211_stop(vap); 5427 5428 WPI_LOCK(sc); 5429 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5430 WPI_UNLOCK(sc); 5431 } 5432 5433 static int 5434 wpi_init(struct wpi_softc *sc) 5435 { 5436 int error = 0; 5437 5438 WPI_LOCK(sc); 5439 5440 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5441 5442 if (sc->sc_running != 0) 5443 goto end; 5444 5445 /* Check that the radio is not disabled by hardware switch. */ 5446 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5447 device_printf(sc->sc_dev, 5448 "RF switch: radio disabled (%s)\n", __func__); 5449 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5450 sc); 5451 error = EINPROGRESS; 5452 goto end; 5453 } 5454 5455 /* Read firmware images from the filesystem. */ 5456 if ((error = wpi_read_firmware(sc)) != 0) { 5457 device_printf(sc->sc_dev, 5458 "%s: could not read firmware, error %d\n", __func__, 5459 error); 5460 goto end; 5461 } 5462 5463 sc->sc_running = 1; 5464 5465 /* Initialize hardware and upload firmware. */ 5466 error = wpi_hw_init(sc); 5467 wpi_unload_firmware(sc); 5468 if (error != 0) { 5469 device_printf(sc->sc_dev, 5470 "%s: could not initialize hardware, error %d\n", __func__, 5471 error); 5472 goto fail; 5473 } 5474 5475 /* Configure adapter now that it is ready. */ 5476 if ((error = wpi_config(sc)) != 0) { 5477 device_printf(sc->sc_dev, 5478 "%s: could not configure device, error %d\n", __func__, 5479 error); 5480 goto fail; 5481 } 5482 5483 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5484 5485 WPI_UNLOCK(sc); 5486 5487 return 0; 5488 5489 fail: wpi_stop_locked(sc); 5490 5491 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5492 WPI_UNLOCK(sc); 5493 5494 return error; 5495 } 5496 5497 static void 5498 wpi_stop_locked(struct wpi_softc *sc) 5499 { 5500 5501 WPI_LOCK_ASSERT(sc); 5502 5503 if (sc->sc_running == 0) 5504 return; 5505 5506 WPI_TX_LOCK(sc); 5507 WPI_TXQ_LOCK(sc); 5508 sc->sc_running = 0; 5509 WPI_TXQ_UNLOCK(sc); 5510 WPI_TX_UNLOCK(sc); 5511 5512 WPI_TXQ_STATE_LOCK(sc); 5513 callout_stop(&sc->tx_timeout); 5514 WPI_TXQ_STATE_UNLOCK(sc); 5515 5516 WPI_RXON_LOCK(sc); 5517 callout_stop(&sc->scan_timeout); 5518 callout_stop(&sc->calib_to); 5519 WPI_RXON_UNLOCK(sc); 5520 5521 /* Power OFF hardware. */ 5522 wpi_hw_stop(sc); 5523 } 5524 5525 static void 5526 wpi_stop(struct wpi_softc *sc) 5527 { 5528 WPI_LOCK(sc); 5529 wpi_stop_locked(sc); 5530 WPI_UNLOCK(sc); 5531 } 5532 5533 /* 5534 * Callback from net80211 to start a scan. 5535 */ 5536 static void 5537 wpi_scan_start(struct ieee80211com *ic) 5538 { 5539 struct wpi_softc *sc = ic->ic_softc; 5540 5541 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5542 } 5543 5544 /* 5545 * Callback from net80211 to terminate a scan. 5546 */ 5547 static void 5548 wpi_scan_end(struct ieee80211com *ic) 5549 { 5550 struct wpi_softc *sc = ic->ic_softc; 5551 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5552 5553 if (vap->iv_state == IEEE80211_S_RUN) 5554 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5555 } 5556 5557 /** 5558 * Called by the net80211 framework to indicate to the driver 5559 * that the channel should be changed 5560 */ 5561 static void 5562 wpi_set_channel(struct ieee80211com *ic) 5563 { 5564 const struct ieee80211_channel *c = ic->ic_curchan; 5565 struct wpi_softc *sc = ic->ic_softc; 5566 int error; 5567 5568 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5569 5570 WPI_LOCK(sc); 5571 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5572 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5573 WPI_UNLOCK(sc); 5574 WPI_TX_LOCK(sc); 5575 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5576 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5577 WPI_TX_UNLOCK(sc); 5578 5579 /* 5580 * Only need to set the channel in Monitor mode. AP scanning and auth 5581 * are already taken care of by their respective firmware commands. 5582 */ 5583 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5584 WPI_RXON_LOCK(sc); 5585 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5586 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5587 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5588 WPI_RXON_24GHZ); 5589 } else { 5590 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5591 WPI_RXON_24GHZ); 5592 } 5593 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5594 device_printf(sc->sc_dev, 5595 "%s: error %d setting channel\n", __func__, 5596 error); 5597 WPI_RXON_UNLOCK(sc); 5598 } 5599 } 5600 5601 /** 5602 * Called by net80211 to indicate that we need to scan the current 5603 * channel. The channel is previously be set via the wpi_set_channel 5604 * callback. 5605 */ 5606 static void 5607 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5608 { 5609 struct ieee80211vap *vap = ss->ss_vap; 5610 struct ieee80211com *ic = vap->iv_ic; 5611 struct wpi_softc *sc = ic->ic_softc; 5612 int error; 5613 5614 WPI_RXON_LOCK(sc); 5615 error = wpi_scan(sc, ic->ic_curchan); 5616 WPI_RXON_UNLOCK(sc); 5617 if (error != 0) 5618 ieee80211_cancel_scan(vap); 5619 } 5620 5621 /** 5622 * Called by the net80211 framework to indicate 5623 * the minimum dwell time has been met, terminate the scan. 5624 * We don't actually terminate the scan as the firmware will notify 5625 * us when it's finished and we have no way to interrupt it. 5626 */ 5627 static void 5628 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5629 { 5630 /* NB: don't try to abort scan; wait for firmware to finish */ 5631 } 5632 5633 static void 5634 wpi_hw_reset(void *arg, int pending) 5635 { 5636 struct wpi_softc *sc = arg; 5637 struct ieee80211com *ic = &sc->sc_ic; 5638 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5639 5640 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5641 5642 ieee80211_notify_radio(ic, 0); 5643 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5644 ieee80211_cancel_scan(vap); 5645 5646 wpi_stop(sc); 5647 if (vap != NULL) { 5648 ieee80211_stop(vap); 5649 ieee80211_init(vap); 5650 } 5651 } 5652