1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 /* 22 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 23 * 24 * The 3945ABG network adapter doesn't use traditional hardware as 25 * many other adaptors do. Instead at run time the eeprom is set into a known 26 * state and told to load boot firmware. The boot firmware loads an init and a 27 * main binary firmware image into SRAM on the card via DMA. 28 * Once the firmware is loaded, the driver/hw then 29 * communicate by way of circular dma rings via the SRAM to the firmware. 30 * 31 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 32 * The 4 tx data rings allow for prioritization QoS. 33 * 34 * The rx data ring consists of 32 dma buffers. Two registers are used to 35 * indicate where in the ring the driver and the firmware are up to. The 36 * driver sets the initial read index (reg1) and the initial write index (reg2), 37 * the firmware updates the read index (reg1) on rx of a packet and fires an 38 * interrupt. The driver then processes the buffers starting at reg1 indicating 39 * to the firmware which buffers have been accessed by updating reg2. At the 40 * same time allocating new memory for the processed buffer. 41 * 42 * A similar thing happens with the tx rings. The difference is the firmware 43 * stop processing buffers once the queue is full and until confirmation 44 * of a successful transmition (tx_done) has occurred. 45 * 46 * The command ring operates in the same manner as the tx queues. 47 * 48 * All communication direct to the card (ie eeprom) is classed as Stage1 49 * communication 50 * 51 * All communication via the firmware to the card is classed as State2. 52 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 53 * firmware. The bootstrap firmware and runtime firmware are loaded 54 * from host memory via dma to the card then told to execute. From this point 55 * on the majority of communications between the driver and the card goes 56 * via the firmware. 57 */ 58 59 #include "opt_wlan.h" 60 #include "opt_wpi.h" 61 62 #include <sys/param.h> 63 #include <sys/sysctl.h> 64 #include <sys/sockio.h> 65 #include <sys/mbuf.h> 66 #include <sys/kernel.h> 67 #include <sys/socket.h> 68 #include <sys/systm.h> 69 #include <sys/malloc.h> 70 #include <sys/queue.h> 71 #include <sys/taskqueue.h> 72 #include <sys/module.h> 73 #include <sys/bus.h> 74 #include <sys/endian.h> 75 #include <sys/linker.h> 76 #include <sys/firmware.h> 77 78 #include <machine/bus.h> 79 #include <machine/resource.h> 80 #include <sys/rman.h> 81 82 #include <dev/pci/pcireg.h> 83 #include <dev/pci/pcivar.h> 84 85 #include <net/bpf.h> 86 #include <net/if.h> 87 #include <net/if_var.h> 88 #include <net/if_arp.h> 89 #include <net/ethernet.h> 90 #include <net/if_dl.h> 91 #include <net/if_media.h> 92 #include <net/if_types.h> 93 94 #include <netinet/in.h> 95 #include <netinet/in_systm.h> 96 #include <netinet/in_var.h> 97 #include <netinet/if_ether.h> 98 #include <netinet/ip.h> 99 100 #include <net80211/ieee80211_var.h> 101 #include <net80211/ieee80211_radiotap.h> 102 #include <net80211/ieee80211_regdomain.h> 103 #include <net80211/ieee80211_ratectl.h> 104 105 #include <dev/wpi/if_wpireg.h> 106 #include <dev/wpi/if_wpivar.h> 107 #include <dev/wpi/if_wpi_debug.h> 108 109 struct wpi_ident { 110 uint16_t vendor; 111 uint16_t device; 112 uint16_t subdevice; 113 const char *name; 114 }; 115 116 static const struct wpi_ident wpi_ident_table[] = { 117 /* The below entries support ABG regardless of the subid */ 118 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 119 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 /* The below entries only support BG */ 121 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 122 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0, 0, 0, NULL } 126 }; 127 128 static int wpi_probe(device_t); 129 static int wpi_attach(device_t); 130 static void wpi_radiotap_attach(struct wpi_softc *); 131 static void wpi_sysctlattach(struct wpi_softc *); 132 static void wpi_init_beacon(struct wpi_vap *); 133 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 134 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 135 const uint8_t [IEEE80211_ADDR_LEN], 136 const uint8_t [IEEE80211_ADDR_LEN]); 137 static void wpi_vap_delete(struct ieee80211vap *); 138 static int wpi_detach(device_t); 139 static int wpi_shutdown(device_t); 140 static int wpi_suspend(device_t); 141 static int wpi_resume(device_t); 142 static int wpi_nic_lock(struct wpi_softc *); 143 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 144 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 145 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 146 void **, bus_size_t, bus_size_t); 147 static void wpi_dma_contig_free(struct wpi_dma_info *); 148 static int wpi_alloc_shared(struct wpi_softc *); 149 static void wpi_free_shared(struct wpi_softc *); 150 static int wpi_alloc_fwmem(struct wpi_softc *); 151 static void wpi_free_fwmem(struct wpi_softc *); 152 static int wpi_alloc_rx_ring(struct wpi_softc *); 153 static void wpi_update_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring_ps(struct wpi_softc *); 155 static void wpi_reset_rx_ring(struct wpi_softc *); 156 static void wpi_free_rx_ring(struct wpi_softc *); 157 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 158 uint8_t); 159 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 160 static void wpi_update_tx_ring_ps(struct wpi_softc *, 161 struct wpi_tx_ring *); 162 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 163 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static int wpi_read_eeprom(struct wpi_softc *, 165 uint8_t macaddr[IEEE80211_ADDR_LEN]); 166 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 167 static void wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *, 168 struct ieee80211_channel[]); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, uint8_t); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static void wpi_getradiocaps(struct ieee80211com *, int, int *, 173 struct ieee80211_channel[]); 174 static int wpi_setregdomain(struct ieee80211com *, 175 struct ieee80211_regdomain *, int, 176 struct ieee80211_channel[]); 177 static int wpi_read_eeprom_group(struct wpi_softc *, uint8_t); 178 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 179 const uint8_t mac[IEEE80211_ADDR_LEN]); 180 static void wpi_node_free(struct ieee80211_node *); 181 static void wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 182 const struct ieee80211_rx_stats *, 183 int, int); 184 static void wpi_restore_node(void *, struct ieee80211_node *); 185 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 186 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 187 static void wpi_calib_timeout(void *); 188 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 189 struct wpi_rx_data *); 190 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 191 struct wpi_rx_data *); 192 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 194 static void wpi_notif_intr(struct wpi_softc *); 195 static void wpi_wakeup_intr(struct wpi_softc *); 196 #ifdef WPI_DEBUG 197 static void wpi_debug_registers(struct wpi_softc *); 198 #endif 199 static void wpi_fatal_intr(struct wpi_softc *); 200 static void wpi_intr(void *); 201 static void wpi_free_txfrags(struct wpi_softc *, uint16_t); 202 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 203 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *); 205 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 206 struct ieee80211_node *, 207 const struct ieee80211_bpf_params *); 208 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 209 const struct ieee80211_bpf_params *); 210 static int wpi_transmit(struct ieee80211com *, struct mbuf *); 211 static void wpi_watchdog_rfkill(void *); 212 static void wpi_scan_timeout(void *); 213 static void wpi_tx_timeout(void *); 214 static void wpi_parent(struct ieee80211com *); 215 static int wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t, 216 int); 217 static int wpi_mrr_setup(struct wpi_softc *); 218 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 219 static int wpi_add_broadcast_node(struct wpi_softc *, int); 220 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 221 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 222 static int wpi_updateedca(struct ieee80211com *); 223 static void wpi_set_promisc(struct wpi_softc *); 224 static void wpi_update_promisc(struct ieee80211com *); 225 static void wpi_update_mcast(struct ieee80211com *); 226 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 227 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 228 static void wpi_power_calibration(struct wpi_softc *); 229 static int wpi_set_txpower(struct wpi_softc *, int); 230 static int wpi_get_power_index(struct wpi_softc *, 231 struct wpi_power_group *, uint8_t, int, int); 232 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 233 static int wpi_send_btcoex(struct wpi_softc *); 234 static int wpi_send_rxon(struct wpi_softc *, int, int); 235 static int wpi_config(struct wpi_softc *); 236 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 237 struct ieee80211_channel *, uint8_t); 238 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 239 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 240 struct ieee80211_channel *); 241 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 242 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 243 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 244 static int wpi_config_beacon(struct wpi_vap *); 245 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 246 static void wpi_update_beacon(struct ieee80211vap *, int); 247 static void wpi_newassoc(struct ieee80211_node *, int); 248 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 249 static int wpi_load_key(struct ieee80211_node *, 250 const struct ieee80211_key *); 251 static void wpi_load_key_cb(void *, struct ieee80211_node *); 252 static int wpi_set_global_keys(struct ieee80211_node *); 253 static int wpi_del_key(struct ieee80211_node *, 254 const struct ieee80211_key *); 255 static void wpi_del_key_cb(void *, struct ieee80211_node *); 256 static int wpi_process_key(struct ieee80211vap *, 257 const struct ieee80211_key *, int); 258 static int wpi_key_set(struct ieee80211vap *, 259 const struct ieee80211_key *); 260 static int wpi_key_delete(struct ieee80211vap *, 261 const struct ieee80211_key *); 262 static int wpi_post_alive(struct wpi_softc *); 263 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, 264 uint32_t); 265 static int wpi_load_firmware(struct wpi_softc *); 266 static int wpi_read_firmware(struct wpi_softc *); 267 static void wpi_unload_firmware(struct wpi_softc *); 268 static int wpi_clock_wait(struct wpi_softc *); 269 static int wpi_apm_init(struct wpi_softc *); 270 static void wpi_apm_stop_master(struct wpi_softc *); 271 static void wpi_apm_stop(struct wpi_softc *); 272 static void wpi_nic_config(struct wpi_softc *); 273 static int wpi_hw_init(struct wpi_softc *); 274 static void wpi_hw_stop(struct wpi_softc *); 275 static void wpi_radio_on(void *, int); 276 static void wpi_radio_off(void *, int); 277 static int wpi_init(struct wpi_softc *); 278 static void wpi_stop_locked(struct wpi_softc *); 279 static void wpi_stop(struct wpi_softc *); 280 static void wpi_scan_start(struct ieee80211com *); 281 static void wpi_scan_end(struct ieee80211com *); 282 static void wpi_set_channel(struct ieee80211com *); 283 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 284 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 285 286 static device_method_t wpi_methods[] = { 287 /* Device interface */ 288 DEVMETHOD(device_probe, wpi_probe), 289 DEVMETHOD(device_attach, wpi_attach), 290 DEVMETHOD(device_detach, wpi_detach), 291 DEVMETHOD(device_shutdown, wpi_shutdown), 292 DEVMETHOD(device_suspend, wpi_suspend), 293 DEVMETHOD(device_resume, wpi_resume), 294 295 DEVMETHOD_END 296 }; 297 298 static driver_t wpi_driver = { 299 "wpi", 300 wpi_methods, 301 sizeof (struct wpi_softc) 302 }; 303 304 DRIVER_MODULE(wpi, pci, wpi_driver, NULL, NULL); 305 306 MODULE_VERSION(wpi, 1); 307 308 MODULE_DEPEND(wpi, pci, 1, 1, 1); 309 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 310 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 311 312 static int 313 wpi_probe(device_t dev) 314 { 315 const struct wpi_ident *ident; 316 317 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 318 if (pci_get_vendor(dev) == ident->vendor && 319 pci_get_device(dev) == ident->device) { 320 device_set_desc(dev, ident->name); 321 return (BUS_PROBE_DEFAULT); 322 } 323 } 324 return ENXIO; 325 } 326 327 static int 328 wpi_attach(device_t dev) 329 { 330 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 331 struct ieee80211com *ic; 332 uint8_t i; 333 int error, rid; 334 #ifdef WPI_DEBUG 335 int supportsa = 1; 336 const struct wpi_ident *ident; 337 #endif 338 339 sc->sc_dev = dev; 340 341 #ifdef WPI_DEBUG 342 error = resource_int_value(device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 344 if (error != 0) 345 sc->sc_debug = 0; 346 #else 347 sc->sc_debug = 0; 348 #endif 349 350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 351 352 /* 353 * Get the offset of the PCI Express Capability Structure in PCI 354 * Configuration Space. 355 */ 356 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 357 if (error != 0) { 358 device_printf(dev, "PCIe capability structure not found!\n"); 359 return error; 360 } 361 362 /* 363 * Some card's only support 802.11b/g not a, check to see if 364 * this is one such card. A 0x0 in the subdevice table indicates 365 * the entire subdevice range is to be ignored. 366 */ 367 #ifdef WPI_DEBUG 368 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 369 if (ident->subdevice && 370 pci_get_subdevice(dev) == ident->subdevice) { 371 supportsa = 0; 372 break; 373 } 374 } 375 #endif 376 377 /* Clear device-specific "PCI retry timeout" register (41h). */ 378 pci_write_config(dev, 0x41, 0, 1); 379 380 /* Enable bus-mastering. */ 381 pci_enable_busmaster(dev); 382 383 rid = PCIR_BAR(0); 384 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 385 RF_ACTIVE); 386 if (sc->mem == NULL) { 387 device_printf(dev, "can't map mem space\n"); 388 return ENOMEM; 389 } 390 sc->sc_st = rman_get_bustag(sc->mem); 391 sc->sc_sh = rman_get_bushandle(sc->mem); 392 393 rid = 1; 394 if (pci_alloc_msi(dev, &rid) == 0) 395 rid = 1; 396 else 397 rid = 0; 398 /* Install interrupt handler. */ 399 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 400 (rid != 0 ? 0 : RF_SHAREABLE)); 401 if (sc->irq == NULL) { 402 device_printf(dev, "can't map interrupt\n"); 403 error = ENOMEM; 404 goto fail; 405 } 406 407 WPI_LOCK_INIT(sc); 408 WPI_TX_LOCK_INIT(sc); 409 WPI_RXON_LOCK_INIT(sc); 410 WPI_NT_LOCK_INIT(sc); 411 WPI_TXQ_LOCK_INIT(sc); 412 WPI_TXQ_STATE_LOCK_INIT(sc); 413 414 /* Allocate DMA memory for firmware transfers. */ 415 if ((error = wpi_alloc_fwmem(sc)) != 0) { 416 device_printf(dev, 417 "could not allocate memory for firmware, error %d\n", 418 error); 419 goto fail; 420 } 421 422 /* Allocate shared page. */ 423 if ((error = wpi_alloc_shared(sc)) != 0) { 424 device_printf(dev, "could not allocate shared page\n"); 425 goto fail; 426 } 427 428 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 429 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 430 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 431 device_printf(dev, 432 "could not allocate TX ring %d, error %d\n", i, 433 error); 434 goto fail; 435 } 436 } 437 438 /* Allocate RX ring. */ 439 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 440 device_printf(dev, "could not allocate RX ring, error %d\n", 441 error); 442 goto fail; 443 } 444 445 /* Clear pending interrupts. */ 446 WPI_WRITE(sc, WPI_INT, 0xffffffff); 447 448 ic = &sc->sc_ic; 449 ic->ic_softc = sc; 450 ic->ic_name = device_get_nameunit(dev); 451 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 452 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 453 454 /* Set device capabilities. */ 455 ic->ic_caps = 456 IEEE80211_C_STA /* station mode supported */ 457 | IEEE80211_C_IBSS /* IBSS mode supported */ 458 | IEEE80211_C_HOSTAP /* Host access point mode */ 459 | IEEE80211_C_MONITOR /* monitor mode supported */ 460 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 461 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 462 | IEEE80211_C_TXFRAG /* handle tx frags */ 463 | IEEE80211_C_TXPMGT /* tx power management */ 464 | IEEE80211_C_SHSLOT /* short slot time supported */ 465 | IEEE80211_C_WPA /* 802.11i */ 466 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 467 | IEEE80211_C_WME /* 802.11e */ 468 | IEEE80211_C_PMGT /* Station-side power mgmt */ 469 ; 470 471 ic->ic_cryptocaps = 472 IEEE80211_CRYPTO_AES_CCM; 473 474 /* 475 * Read in the eeprom and also setup the channels for 476 * net80211. We don't set the rates as net80211 does this for us 477 */ 478 if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) { 479 device_printf(dev, "could not read EEPROM, error %d\n", 480 error); 481 goto fail; 482 } 483 484 #ifdef WPI_DEBUG 485 if (bootverbose) { 486 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 487 sc->domain); 488 device_printf(sc->sc_dev, "Hardware Type: %c\n", 489 sc->type > 1 ? 'B': '?'); 490 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 491 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 492 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 493 supportsa ? "does" : "does not"); 494 495 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 496 check what sc->rev really represents - benjsc 20070615 */ 497 } 498 #endif 499 500 ieee80211_ifattach(ic); 501 ic->ic_vap_create = wpi_vap_create; 502 ic->ic_vap_delete = wpi_vap_delete; 503 ic->ic_parent = wpi_parent; 504 ic->ic_raw_xmit = wpi_raw_xmit; 505 ic->ic_transmit = wpi_transmit; 506 ic->ic_node_alloc = wpi_node_alloc; 507 sc->sc_node_free = ic->ic_node_free; 508 ic->ic_node_free = wpi_node_free; 509 ic->ic_wme.wme_update = wpi_updateedca; 510 ic->ic_update_promisc = wpi_update_promisc; 511 ic->ic_update_mcast = wpi_update_mcast; 512 ic->ic_newassoc = wpi_newassoc; 513 ic->ic_scan_start = wpi_scan_start; 514 ic->ic_scan_end = wpi_scan_end; 515 ic->ic_set_channel = wpi_set_channel; 516 ic->ic_scan_curchan = wpi_scan_curchan; 517 ic->ic_scan_mindwell = wpi_scan_mindwell; 518 ic->ic_getradiocaps = wpi_getradiocaps; 519 ic->ic_setregdomain = wpi_setregdomain; 520 521 sc->sc_update_rx_ring = wpi_update_rx_ring; 522 sc->sc_update_tx_ring = wpi_update_tx_ring; 523 524 wpi_radiotap_attach(sc); 525 526 /* Setup Tx status flags (constant). */ 527 sc->sc_txs.flags = IEEE80211_RATECTL_STATUS_PKTLEN | 528 IEEE80211_RATECTL_STATUS_SHORT_RETRY | 529 IEEE80211_RATECTL_STATUS_LONG_RETRY; 530 531 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 532 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 533 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 534 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 535 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 536 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 537 538 wpi_sysctlattach(sc); 539 540 /* 541 * Hook our interrupt after all initialization is complete. 542 */ 543 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 544 NULL, wpi_intr, sc, &sc->sc_ih); 545 if (error != 0) { 546 device_printf(dev, "can't establish interrupt, error %d\n", 547 error); 548 goto fail; 549 } 550 551 if (bootverbose) 552 ieee80211_announce(ic); 553 554 #ifdef WPI_DEBUG 555 if (sc->sc_debug & WPI_DEBUG_HW) 556 ieee80211_announce_channels(ic); 557 #endif 558 559 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 560 return 0; 561 562 fail: wpi_detach(dev); 563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 564 return error; 565 } 566 567 /* 568 * Attach the interface to 802.11 radiotap. 569 */ 570 static void 571 wpi_radiotap_attach(struct wpi_softc *sc) 572 { 573 struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap; 574 struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap; 575 576 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 577 ieee80211_radiotap_attach(&sc->sc_ic, 578 &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT, 579 &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT); 580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 581 } 582 583 static void 584 wpi_sysctlattach(struct wpi_softc *sc) 585 { 586 #ifdef WPI_DEBUG 587 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 588 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 589 590 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 591 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 592 "control debugging printfs"); 593 #endif 594 } 595 596 static void 597 wpi_init_beacon(struct wpi_vap *wvp) 598 { 599 struct wpi_buf *bcn = &wvp->wv_bcbuf; 600 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 601 602 cmd->id = WPI_ID_BROADCAST; 603 cmd->ofdm_mask = 0xff; 604 cmd->cck_mask = 0x0f; 605 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 606 607 /* 608 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 609 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 610 */ 611 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 612 613 bcn->code = WPI_CMD_SET_BEACON; 614 bcn->ac = WPI_CMD_QUEUE_NUM; 615 bcn->size = sizeof(struct wpi_cmd_beacon); 616 } 617 618 static struct ieee80211vap * 619 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 620 enum ieee80211_opmode opmode, int flags, 621 const uint8_t bssid[IEEE80211_ADDR_LEN], 622 const uint8_t mac[IEEE80211_ADDR_LEN]) 623 { 624 struct wpi_vap *wvp; 625 struct ieee80211vap *vap; 626 627 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 628 return NULL; 629 630 wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO); 631 vap = &wvp->wv_vap; 632 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 633 634 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 635 WPI_VAP_LOCK_INIT(wvp); 636 wpi_init_beacon(wvp); 637 } 638 639 /* Override with driver methods. */ 640 vap->iv_key_set = wpi_key_set; 641 vap->iv_key_delete = wpi_key_delete; 642 if (opmode == IEEE80211_M_IBSS) { 643 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 644 vap->iv_recv_mgmt = wpi_ibss_recv_mgmt; 645 } 646 wvp->wv_newstate = vap->iv_newstate; 647 vap->iv_newstate = wpi_newstate; 648 vap->iv_update_beacon = wpi_update_beacon; 649 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 650 651 ieee80211_ratectl_init(vap); 652 /* Complete setup. */ 653 ieee80211_vap_attach(vap, ieee80211_media_change, 654 ieee80211_media_status, mac); 655 ic->ic_opmode = opmode; 656 return vap; 657 } 658 659 static void 660 wpi_vap_delete(struct ieee80211vap *vap) 661 { 662 struct wpi_vap *wvp = WPI_VAP(vap); 663 struct wpi_buf *bcn = &wvp->wv_bcbuf; 664 enum ieee80211_opmode opmode = vap->iv_opmode; 665 666 ieee80211_ratectl_deinit(vap); 667 ieee80211_vap_detach(vap); 668 669 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 670 if (bcn->m != NULL) 671 m_freem(bcn->m); 672 673 WPI_VAP_LOCK_DESTROY(wvp); 674 } 675 676 free(wvp, M_80211_VAP); 677 } 678 679 static int 680 wpi_detach(device_t dev) 681 { 682 struct wpi_softc *sc = device_get_softc(dev); 683 struct ieee80211com *ic = &sc->sc_ic; 684 uint8_t qid; 685 686 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 687 688 if (ic->ic_vap_create == wpi_vap_create) { 689 ieee80211_draintask(ic, &sc->sc_radioon_task); 690 ieee80211_draintask(ic, &sc->sc_radiooff_task); 691 692 wpi_stop(sc); 693 694 callout_drain(&sc->watchdog_rfkill); 695 callout_drain(&sc->tx_timeout); 696 callout_drain(&sc->scan_timeout); 697 callout_drain(&sc->calib_to); 698 ieee80211_ifdetach(ic); 699 } 700 701 /* Uninstall interrupt handler. */ 702 if (sc->irq != NULL) { 703 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 704 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 705 sc->irq); 706 pci_release_msi(dev); 707 } 708 709 if (sc->txq[0].data_dmat) { 710 /* Free DMA resources. */ 711 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 712 wpi_free_tx_ring(sc, &sc->txq[qid]); 713 714 wpi_free_rx_ring(sc); 715 wpi_free_shared(sc); 716 } 717 718 if (sc->fw_dma.tag) 719 wpi_free_fwmem(sc); 720 721 if (sc->mem != NULL) 722 bus_release_resource(dev, SYS_RES_MEMORY, 723 rman_get_rid(sc->mem), sc->mem); 724 725 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 726 WPI_TXQ_STATE_LOCK_DESTROY(sc); 727 WPI_TXQ_LOCK_DESTROY(sc); 728 WPI_NT_LOCK_DESTROY(sc); 729 WPI_RXON_LOCK_DESTROY(sc); 730 WPI_TX_LOCK_DESTROY(sc); 731 WPI_LOCK_DESTROY(sc); 732 return 0; 733 } 734 735 static int 736 wpi_shutdown(device_t dev) 737 { 738 struct wpi_softc *sc = device_get_softc(dev); 739 740 wpi_stop(sc); 741 return 0; 742 } 743 744 static int 745 wpi_suspend(device_t dev) 746 { 747 struct wpi_softc *sc = device_get_softc(dev); 748 struct ieee80211com *ic = &sc->sc_ic; 749 750 ieee80211_suspend_all(ic); 751 return 0; 752 } 753 754 static int 755 wpi_resume(device_t dev) 756 { 757 struct wpi_softc *sc = device_get_softc(dev); 758 struct ieee80211com *ic = &sc->sc_ic; 759 760 /* Clear device-specific "PCI retry timeout" register (41h). */ 761 pci_write_config(dev, 0x41, 0, 1); 762 763 ieee80211_resume_all(ic); 764 return 0; 765 } 766 767 /* 768 * Grab exclusive access to NIC memory. 769 */ 770 static int 771 wpi_nic_lock(struct wpi_softc *sc) 772 { 773 int ntries; 774 775 /* Request exclusive access to NIC. */ 776 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 777 778 /* Spin until we actually get the lock. */ 779 for (ntries = 0; ntries < 1000; ntries++) { 780 if ((WPI_READ(sc, WPI_GP_CNTRL) & 781 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 782 WPI_GP_CNTRL_MAC_ACCESS_ENA) 783 return 0; 784 DELAY(10); 785 } 786 787 device_printf(sc->sc_dev, "could not lock memory\n"); 788 789 return ETIMEDOUT; 790 } 791 792 /* 793 * Release lock on NIC memory. 794 */ 795 static __inline void 796 wpi_nic_unlock(struct wpi_softc *sc) 797 { 798 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 799 } 800 801 static __inline uint32_t 802 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 803 { 804 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 805 WPI_BARRIER_READ_WRITE(sc); 806 return WPI_READ(sc, WPI_PRPH_RDATA); 807 } 808 809 static __inline void 810 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 811 { 812 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 813 WPI_BARRIER_WRITE(sc); 814 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 815 } 816 817 static __inline void 818 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 819 { 820 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 821 } 822 823 static __inline void 824 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 825 { 826 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 827 } 828 829 static __inline void 830 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 831 const uint32_t *data, uint32_t count) 832 { 833 for (; count != 0; count--, data++, addr += 4) 834 wpi_prph_write(sc, addr, *data); 835 } 836 837 static __inline uint32_t 838 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 839 { 840 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 841 WPI_BARRIER_READ_WRITE(sc); 842 return WPI_READ(sc, WPI_MEM_RDATA); 843 } 844 845 static __inline void 846 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 847 int count) 848 { 849 for (; count > 0; count--, addr += 4) 850 *data++ = wpi_mem_read(sc, addr); 851 } 852 853 static int 854 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 855 { 856 uint8_t *out = data; 857 uint32_t val; 858 int error, ntries; 859 860 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 861 862 if ((error = wpi_nic_lock(sc)) != 0) 863 return error; 864 865 for (; count > 0; count -= 2, addr++) { 866 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 867 for (ntries = 0; ntries < 10; ntries++) { 868 val = WPI_READ(sc, WPI_EEPROM); 869 if (val & WPI_EEPROM_READ_VALID) 870 break; 871 DELAY(5); 872 } 873 if (ntries == 10) { 874 device_printf(sc->sc_dev, 875 "timeout reading ROM at 0x%x\n", addr); 876 return ETIMEDOUT; 877 } 878 *out++= val >> 16; 879 if (count > 1) 880 *out ++= val >> 24; 881 } 882 883 wpi_nic_unlock(sc); 884 885 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 886 887 return 0; 888 } 889 890 static void 891 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 892 { 893 if (error != 0) 894 return; 895 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 896 *(bus_addr_t *)arg = segs[0].ds_addr; 897 } 898 899 /* 900 * Allocates a contiguous block of dma memory of the requested size and 901 * alignment. 902 */ 903 static int 904 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 905 void **kvap, bus_size_t size, bus_size_t alignment) 906 { 907 int error; 908 909 dma->tag = NULL; 910 dma->size = size; 911 912 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 913 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 914 1, size, 0, NULL, NULL, &dma->tag); 915 if (error != 0) 916 goto fail; 917 918 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 919 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 920 if (error != 0) 921 goto fail; 922 923 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 924 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 925 if (error != 0) 926 goto fail; 927 928 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 929 930 if (kvap != NULL) 931 *kvap = dma->vaddr; 932 933 return 0; 934 935 fail: wpi_dma_contig_free(dma); 936 return error; 937 } 938 939 static void 940 wpi_dma_contig_free(struct wpi_dma_info *dma) 941 { 942 if (dma->vaddr != NULL) { 943 bus_dmamap_sync(dma->tag, dma->map, 944 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 945 bus_dmamap_unload(dma->tag, dma->map); 946 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 947 dma->vaddr = NULL; 948 } 949 if (dma->tag != NULL) { 950 bus_dma_tag_destroy(dma->tag); 951 dma->tag = NULL; 952 } 953 } 954 955 /* 956 * Allocate a shared page between host and NIC. 957 */ 958 static int 959 wpi_alloc_shared(struct wpi_softc *sc) 960 { 961 /* Shared buffer must be aligned on a 4KB boundary. */ 962 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 963 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 964 } 965 966 static void 967 wpi_free_shared(struct wpi_softc *sc) 968 { 969 wpi_dma_contig_free(&sc->shared_dma); 970 } 971 972 /* 973 * Allocate DMA-safe memory for firmware transfer. 974 */ 975 static int 976 wpi_alloc_fwmem(struct wpi_softc *sc) 977 { 978 /* Must be aligned on a 16-byte boundary. */ 979 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 980 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 981 } 982 983 static void 984 wpi_free_fwmem(struct wpi_softc *sc) 985 { 986 wpi_dma_contig_free(&sc->fw_dma); 987 } 988 989 static int 990 wpi_alloc_rx_ring(struct wpi_softc *sc) 991 { 992 struct wpi_rx_ring *ring = &sc->rxq; 993 bus_size_t size; 994 int i, error; 995 996 ring->cur = 0; 997 ring->update = 0; 998 999 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1000 1001 /* Allocate RX descriptors (16KB aligned.) */ 1002 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1003 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1004 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1005 if (error != 0) { 1006 device_printf(sc->sc_dev, 1007 "%s: could not allocate RX ring DMA memory, error %d\n", 1008 __func__, error); 1009 goto fail; 1010 } 1011 1012 /* Create RX buffer DMA tag. */ 1013 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1014 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1015 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat); 1016 if (error != 0) { 1017 device_printf(sc->sc_dev, 1018 "%s: could not create RX buf DMA tag, error %d\n", 1019 __func__, error); 1020 goto fail; 1021 } 1022 1023 /* 1024 * Allocate and map RX buffers. 1025 */ 1026 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1027 struct wpi_rx_data *data = &ring->data[i]; 1028 bus_addr_t paddr; 1029 1030 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1031 if (error != 0) { 1032 device_printf(sc->sc_dev, 1033 "%s: could not create RX buf DMA map, error %d\n", 1034 __func__, error); 1035 goto fail; 1036 } 1037 1038 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1039 if (data->m == NULL) { 1040 device_printf(sc->sc_dev, 1041 "%s: could not allocate RX mbuf\n", __func__); 1042 error = ENOBUFS; 1043 goto fail; 1044 } 1045 1046 error = bus_dmamap_load(ring->data_dmat, data->map, 1047 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1048 &paddr, BUS_DMA_NOWAIT); 1049 if (error != 0 && error != EFBIG) { 1050 device_printf(sc->sc_dev, 1051 "%s: can't map mbuf (error %d)\n", __func__, 1052 error); 1053 goto fail; 1054 } 1055 1056 /* Set physical address of RX buffer. */ 1057 ring->desc[i] = htole32(paddr); 1058 } 1059 1060 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1061 BUS_DMASYNC_PREWRITE); 1062 1063 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1064 1065 return 0; 1066 1067 fail: wpi_free_rx_ring(sc); 1068 1069 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1070 1071 return error; 1072 } 1073 1074 static void 1075 wpi_update_rx_ring(struct wpi_softc *sc) 1076 { 1077 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1078 } 1079 1080 static void 1081 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1082 { 1083 struct wpi_rx_ring *ring = &sc->rxq; 1084 1085 if (ring->update != 0) { 1086 /* Wait for INT_WAKEUP event. */ 1087 return; 1088 } 1089 1090 WPI_TXQ_LOCK(sc); 1091 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1092 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1093 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1094 __func__); 1095 ring->update = 1; 1096 } else { 1097 wpi_update_rx_ring(sc); 1098 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1099 } 1100 WPI_TXQ_UNLOCK(sc); 1101 } 1102 1103 static void 1104 wpi_reset_rx_ring(struct wpi_softc *sc) 1105 { 1106 struct wpi_rx_ring *ring = &sc->rxq; 1107 int ntries; 1108 1109 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1110 1111 if (wpi_nic_lock(sc) == 0) { 1112 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1113 for (ntries = 0; ntries < 1000; ntries++) { 1114 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1115 WPI_FH_RX_STATUS_IDLE) 1116 break; 1117 DELAY(10); 1118 } 1119 wpi_nic_unlock(sc); 1120 } 1121 1122 ring->cur = 0; 1123 ring->update = 0; 1124 } 1125 1126 static void 1127 wpi_free_rx_ring(struct wpi_softc *sc) 1128 { 1129 struct wpi_rx_ring *ring = &sc->rxq; 1130 int i; 1131 1132 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1133 1134 wpi_dma_contig_free(&ring->desc_dma); 1135 1136 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1137 struct wpi_rx_data *data = &ring->data[i]; 1138 1139 if (data->m != NULL) { 1140 bus_dmamap_sync(ring->data_dmat, data->map, 1141 BUS_DMASYNC_POSTREAD); 1142 bus_dmamap_unload(ring->data_dmat, data->map); 1143 m_freem(data->m); 1144 data->m = NULL; 1145 } 1146 if (data->map != NULL) 1147 bus_dmamap_destroy(ring->data_dmat, data->map); 1148 } 1149 if (ring->data_dmat != NULL) { 1150 bus_dma_tag_destroy(ring->data_dmat); 1151 ring->data_dmat = NULL; 1152 } 1153 } 1154 1155 static int 1156 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid) 1157 { 1158 bus_addr_t paddr; 1159 bus_size_t size; 1160 int i, error; 1161 1162 ring->qid = qid; 1163 ring->queued = 0; 1164 ring->cur = 0; 1165 ring->pending = 0; 1166 ring->update = 0; 1167 1168 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1169 1170 /* Allocate TX descriptors (16KB aligned.) */ 1171 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1172 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1173 size, WPI_RING_DMA_ALIGN); 1174 if (error != 0) { 1175 device_printf(sc->sc_dev, 1176 "%s: could not allocate TX ring DMA memory, error %d\n", 1177 __func__, error); 1178 goto fail; 1179 } 1180 1181 /* Update shared area with ring physical address. */ 1182 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1183 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1184 BUS_DMASYNC_PREWRITE); 1185 1186 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1187 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1188 size, 4); 1189 if (error != 0) { 1190 device_printf(sc->sc_dev, 1191 "%s: could not allocate TX cmd DMA memory, error %d\n", 1192 __func__, error); 1193 goto fail; 1194 } 1195 1196 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1197 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1198 WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1199 if (error != 0) { 1200 device_printf(sc->sc_dev, 1201 "%s: could not create TX buf DMA tag, error %d\n", 1202 __func__, error); 1203 goto fail; 1204 } 1205 1206 paddr = ring->cmd_dma.paddr; 1207 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1208 struct wpi_tx_data *data = &ring->data[i]; 1209 1210 data->cmd_paddr = paddr; 1211 paddr += sizeof (struct wpi_tx_cmd); 1212 1213 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1214 if (error != 0) { 1215 device_printf(sc->sc_dev, 1216 "%s: could not create TX buf DMA map, error %d\n", 1217 __func__, error); 1218 goto fail; 1219 } 1220 } 1221 1222 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1223 1224 return 0; 1225 1226 fail: wpi_free_tx_ring(sc, ring); 1227 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1228 return error; 1229 } 1230 1231 static void 1232 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1233 { 1234 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1235 } 1236 1237 static void 1238 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1239 { 1240 1241 if (ring->update != 0) { 1242 /* Wait for INT_WAKEUP event. */ 1243 return; 1244 } 1245 1246 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1247 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1248 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1249 __func__, ring->qid); 1250 ring->update = 1; 1251 } else { 1252 wpi_update_tx_ring(sc, ring); 1253 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1254 } 1255 } 1256 1257 static void 1258 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1259 { 1260 int i; 1261 1262 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1263 1264 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1265 struct wpi_tx_data *data = &ring->data[i]; 1266 1267 if (data->m != NULL) { 1268 bus_dmamap_sync(ring->data_dmat, data->map, 1269 BUS_DMASYNC_POSTWRITE); 1270 bus_dmamap_unload(ring->data_dmat, data->map); 1271 m_freem(data->m); 1272 data->m = NULL; 1273 } 1274 if (data->ni != NULL) { 1275 ieee80211_free_node(data->ni); 1276 data->ni = NULL; 1277 } 1278 } 1279 /* Clear TX descriptors. */ 1280 memset(ring->desc, 0, ring->desc_dma.size); 1281 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1282 BUS_DMASYNC_PREWRITE); 1283 ring->queued = 0; 1284 ring->cur = 0; 1285 ring->pending = 0; 1286 ring->update = 0; 1287 } 1288 1289 static void 1290 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1291 { 1292 int i; 1293 1294 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1295 1296 wpi_dma_contig_free(&ring->desc_dma); 1297 wpi_dma_contig_free(&ring->cmd_dma); 1298 1299 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1300 struct wpi_tx_data *data = &ring->data[i]; 1301 1302 if (data->m != NULL) { 1303 bus_dmamap_sync(ring->data_dmat, data->map, 1304 BUS_DMASYNC_POSTWRITE); 1305 bus_dmamap_unload(ring->data_dmat, data->map); 1306 m_freem(data->m); 1307 } 1308 if (data->map != NULL) 1309 bus_dmamap_destroy(ring->data_dmat, data->map); 1310 } 1311 if (ring->data_dmat != NULL) { 1312 bus_dma_tag_destroy(ring->data_dmat); 1313 ring->data_dmat = NULL; 1314 } 1315 } 1316 1317 /* 1318 * Extract various information from EEPROM. 1319 */ 1320 static int 1321 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1322 { 1323 #define WPI_CHK(res) do { \ 1324 if ((error = res) != 0) \ 1325 goto fail; \ 1326 } while (0) 1327 uint8_t i; 1328 int error; 1329 1330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1331 1332 /* Adapter has to be powered on for EEPROM access to work. */ 1333 if ((error = wpi_apm_init(sc)) != 0) { 1334 device_printf(sc->sc_dev, 1335 "%s: could not power ON adapter, error %d\n", __func__, 1336 error); 1337 return error; 1338 } 1339 1340 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1341 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1342 error = EIO; 1343 goto fail; 1344 } 1345 /* Clear HW ownership of EEPROM. */ 1346 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1347 1348 /* Read the hardware capabilities, revision and SKU type. */ 1349 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1350 sizeof(sc->cap))); 1351 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1352 sizeof(sc->rev))); 1353 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1354 sizeof(sc->type))); 1355 1356 sc->rev = le16toh(sc->rev); 1357 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1358 sc->rev, sc->type); 1359 1360 /* Read the regulatory domain (4 ASCII characters.) */ 1361 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1362 sizeof(sc->domain))); 1363 1364 /* Read MAC address. */ 1365 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1366 IEEE80211_ADDR_LEN)); 1367 1368 /* Read the list of authorized channels. */ 1369 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1370 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1371 1372 /* Read the list of TX power groups. */ 1373 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1374 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1375 1376 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1377 1378 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1379 __func__); 1380 1381 return error; 1382 #undef WPI_CHK 1383 } 1384 1385 /* 1386 * Translate EEPROM flags to net80211. 1387 */ 1388 static uint32_t 1389 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1390 { 1391 uint32_t nflags; 1392 1393 nflags = 0; 1394 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1395 nflags |= IEEE80211_CHAN_PASSIVE; 1396 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1397 nflags |= IEEE80211_CHAN_NOADHOC; 1398 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1399 nflags |= IEEE80211_CHAN_DFS; 1400 /* XXX apparently IBSS may still be marked */ 1401 nflags |= IEEE80211_CHAN_NOADHOC; 1402 } 1403 1404 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1405 if (nflags & IEEE80211_CHAN_NOADHOC) 1406 nflags |= IEEE80211_CHAN_NOHOSTAP; 1407 1408 return nflags; 1409 } 1410 1411 static void 1412 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans, 1413 int *nchans, struct ieee80211_channel chans[]) 1414 { 1415 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1416 const struct wpi_chan_band *band = &wpi_bands[n]; 1417 uint32_t nflags; 1418 uint8_t bands[IEEE80211_MODE_BYTES]; 1419 uint8_t chan, i; 1420 int error; 1421 1422 memset(bands, 0, sizeof(bands)); 1423 1424 if (n == 0) { 1425 setbit(bands, IEEE80211_MODE_11B); 1426 setbit(bands, IEEE80211_MODE_11G); 1427 } else 1428 setbit(bands, IEEE80211_MODE_11A); 1429 1430 for (i = 0; i < band->nchan; i++) { 1431 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1432 DPRINTF(sc, WPI_DEBUG_EEPROM, 1433 "Channel Not Valid: %d, band %d\n", 1434 band->chan[i],n); 1435 continue; 1436 } 1437 1438 chan = band->chan[i]; 1439 nflags = wpi_eeprom_channel_flags(&channels[i]); 1440 error = ieee80211_add_channel(chans, maxchans, nchans, 1441 chan, 0, channels[i].maxpwr, nflags, bands); 1442 if (error != 0) 1443 break; 1444 1445 /* Save maximum allowed TX power for this channel. */ 1446 sc->maxpwr[chan] = channels[i].maxpwr; 1447 1448 DPRINTF(sc, WPI_DEBUG_EEPROM, 1449 "adding chan %d flags=0x%x maxpwr=%d, offset %d\n", 1450 chan, channels[i].flags, sc->maxpwr[chan], *nchans); 1451 } 1452 } 1453 1454 /** 1455 * Read the eeprom to find out what channels are valid for the given 1456 * band and update net80211 with what we find. 1457 */ 1458 static int 1459 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n) 1460 { 1461 struct ieee80211com *ic = &sc->sc_ic; 1462 const struct wpi_chan_band *band = &wpi_bands[n]; 1463 int error; 1464 1465 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1466 1467 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1468 band->nchan * sizeof (struct wpi_eeprom_chan)); 1469 if (error != 0) { 1470 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1471 return error; 1472 } 1473 1474 wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 1475 ic->ic_channels); 1476 1477 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1478 1479 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1480 1481 return 0; 1482 } 1483 1484 static struct wpi_eeprom_chan * 1485 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1486 { 1487 int i, j; 1488 1489 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1490 for (i = 0; i < wpi_bands[j].nchan; i++) 1491 if (wpi_bands[j].chan[i] == c->ic_ieee && 1492 ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) 1493 return &sc->eeprom_channels[j][i]; 1494 1495 return NULL; 1496 } 1497 1498 static void 1499 wpi_getradiocaps(struct ieee80211com *ic, 1500 int maxchans, int *nchans, struct ieee80211_channel chans[]) 1501 { 1502 struct wpi_softc *sc = ic->ic_softc; 1503 int i; 1504 1505 /* Parse the list of authorized channels. */ 1506 for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++) 1507 wpi_read_eeprom_band(sc, i, maxchans, nchans, chans); 1508 } 1509 1510 /* 1511 * Enforce flags read from EEPROM. 1512 */ 1513 static int 1514 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1515 int nchan, struct ieee80211_channel chans[]) 1516 { 1517 struct wpi_softc *sc = ic->ic_softc; 1518 int i; 1519 1520 for (i = 0; i < nchan; i++) { 1521 struct ieee80211_channel *c = &chans[i]; 1522 struct wpi_eeprom_chan *channel; 1523 1524 channel = wpi_find_eeprom_channel(sc, c); 1525 if (channel == NULL) { 1526 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 1527 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1528 return EINVAL; 1529 } 1530 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1531 } 1532 1533 return 0; 1534 } 1535 1536 static int 1537 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n) 1538 { 1539 struct wpi_power_group *group = &sc->groups[n]; 1540 struct wpi_eeprom_group rgroup; 1541 int i, error; 1542 1543 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1544 1545 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1546 &rgroup, sizeof rgroup)) != 0) { 1547 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1548 return error; 1549 } 1550 1551 /* Save TX power group information. */ 1552 group->chan = rgroup.chan; 1553 group->maxpwr = rgroup.maxpwr; 1554 /* Retrieve temperature at which the samples were taken. */ 1555 group->temp = (int16_t)le16toh(rgroup.temp); 1556 1557 DPRINTF(sc, WPI_DEBUG_EEPROM, 1558 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1559 group->maxpwr, group->temp); 1560 1561 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1562 group->samples[i].index = rgroup.samples[i].index; 1563 group->samples[i].power = rgroup.samples[i].power; 1564 1565 DPRINTF(sc, WPI_DEBUG_EEPROM, 1566 "\tsample %d: index=%d power=%d\n", i, 1567 group->samples[i].index, group->samples[i].power); 1568 } 1569 1570 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1571 1572 return 0; 1573 } 1574 1575 static __inline uint8_t 1576 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1577 { 1578 uint8_t newid = WPI_ID_IBSS_MIN; 1579 1580 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1581 if ((sc->nodesmsk & (1 << newid)) == 0) { 1582 sc->nodesmsk |= 1 << newid; 1583 return newid; 1584 } 1585 } 1586 1587 return WPI_ID_UNDEFINED; 1588 } 1589 1590 static __inline uint8_t 1591 wpi_add_node_entry_sta(struct wpi_softc *sc) 1592 { 1593 sc->nodesmsk |= 1 << WPI_ID_BSS; 1594 1595 return WPI_ID_BSS; 1596 } 1597 1598 static __inline int 1599 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1600 { 1601 if (id == WPI_ID_UNDEFINED) 1602 return 0; 1603 1604 return (sc->nodesmsk >> id) & 1; 1605 } 1606 1607 static __inline void 1608 wpi_clear_node_table(struct wpi_softc *sc) 1609 { 1610 sc->nodesmsk = 0; 1611 } 1612 1613 static __inline void 1614 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1615 { 1616 sc->nodesmsk &= ~(1 << id); 1617 } 1618 1619 static struct ieee80211_node * 1620 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1621 { 1622 struct wpi_node *wn; 1623 1624 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1625 M_NOWAIT | M_ZERO); 1626 1627 if (wn == NULL) 1628 return NULL; 1629 1630 wn->id = WPI_ID_UNDEFINED; 1631 1632 return &wn->ni; 1633 } 1634 1635 static void 1636 wpi_node_free(struct ieee80211_node *ni) 1637 { 1638 struct wpi_softc *sc = ni->ni_ic->ic_softc; 1639 struct wpi_node *wn = WPI_NODE(ni); 1640 1641 if (wn->id != WPI_ID_UNDEFINED) { 1642 WPI_NT_LOCK(sc); 1643 if (wpi_check_node_entry(sc, wn->id)) { 1644 wpi_del_node_entry(sc, wn->id); 1645 wpi_del_node(sc, ni); 1646 } 1647 WPI_NT_UNLOCK(sc); 1648 } 1649 1650 sc->sc_node_free(ni); 1651 } 1652 1653 static __inline int 1654 wpi_check_bss_filter(struct wpi_softc *sc) 1655 { 1656 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1657 } 1658 1659 static void 1660 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1661 const struct ieee80211_rx_stats *rxs, 1662 int rssi, int nf) 1663 { 1664 struct ieee80211vap *vap = ni->ni_vap; 1665 struct wpi_softc *sc = vap->iv_ic->ic_softc; 1666 struct wpi_vap *wvp = WPI_VAP(vap); 1667 uint64_t ni_tstamp, rx_tstamp; 1668 1669 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1670 1671 if (vap->iv_state == IEEE80211_S_RUN && 1672 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1673 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1674 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1675 rx_tstamp = le64toh(sc->rx_tstamp); 1676 1677 if (ni_tstamp >= rx_tstamp) { 1678 DPRINTF(sc, WPI_DEBUG_STATE, 1679 "ibss merge, tsf %ju tstamp %ju\n", 1680 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1681 (void) ieee80211_ibss_merge(ni); 1682 } 1683 } 1684 } 1685 1686 static void 1687 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1688 { 1689 struct wpi_softc *sc = arg; 1690 struct wpi_node *wn = WPI_NODE(ni); 1691 int error; 1692 1693 WPI_NT_LOCK(sc); 1694 if (wn->id != WPI_ID_UNDEFINED) { 1695 wn->id = WPI_ID_UNDEFINED; 1696 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1697 device_printf(sc->sc_dev, 1698 "%s: could not add IBSS node, error %d\n", 1699 __func__, error); 1700 } 1701 } 1702 WPI_NT_UNLOCK(sc); 1703 } 1704 1705 static void 1706 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1707 { 1708 struct ieee80211com *ic = &sc->sc_ic; 1709 1710 /* Set group keys once. */ 1711 WPI_NT_LOCK(sc); 1712 wvp->wv_gtk = 0; 1713 WPI_NT_UNLOCK(sc); 1714 1715 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1716 ieee80211_crypto_reload_keys(ic); 1717 } 1718 1719 /** 1720 * Called by net80211 when ever there is a change to 80211 state machine 1721 */ 1722 static int 1723 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1724 { 1725 struct wpi_vap *wvp = WPI_VAP(vap); 1726 struct ieee80211com *ic = vap->iv_ic; 1727 struct wpi_softc *sc = ic->ic_softc; 1728 int error = 0; 1729 1730 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1731 1732 WPI_TXQ_LOCK(sc); 1733 if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) { 1734 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1735 WPI_TXQ_UNLOCK(sc); 1736 1737 return ENXIO; 1738 } 1739 WPI_TXQ_UNLOCK(sc); 1740 1741 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1742 ieee80211_state_name[vap->iv_state], 1743 ieee80211_state_name[nstate]); 1744 1745 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1746 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1747 device_printf(sc->sc_dev, 1748 "%s: could not set power saving level\n", 1749 __func__); 1750 return error; 1751 } 1752 1753 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1754 } 1755 1756 switch (nstate) { 1757 case IEEE80211_S_SCAN: 1758 WPI_RXON_LOCK(sc); 1759 if (wpi_check_bss_filter(sc) != 0) { 1760 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1761 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1762 device_printf(sc->sc_dev, 1763 "%s: could not send RXON\n", __func__); 1764 } 1765 } 1766 WPI_RXON_UNLOCK(sc); 1767 break; 1768 1769 case IEEE80211_S_ASSOC: 1770 if (vap->iv_state != IEEE80211_S_RUN) 1771 break; 1772 /* FALLTHROUGH */ 1773 case IEEE80211_S_AUTH: 1774 /* 1775 * NB: do not optimize AUTH -> AUTH state transmission - 1776 * this will break powersave with non-QoS AP! 1777 */ 1778 1779 /* 1780 * The node must be registered in the firmware before auth. 1781 * Also the associd must be cleared on RUN -> ASSOC 1782 * transitions. 1783 */ 1784 if ((error = wpi_auth(sc, vap)) != 0) { 1785 device_printf(sc->sc_dev, 1786 "%s: could not move to AUTH state, error %d\n", 1787 __func__, error); 1788 } 1789 break; 1790 1791 case IEEE80211_S_RUN: 1792 /* 1793 * RUN -> RUN transition: 1794 * STA mode: Just restart the timers. 1795 * IBSS mode: Process IBSS merge. 1796 */ 1797 if (vap->iv_state == IEEE80211_S_RUN) { 1798 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1799 WPI_RXON_LOCK(sc); 1800 wpi_calib_timeout(sc); 1801 WPI_RXON_UNLOCK(sc); 1802 break; 1803 } else { 1804 /* 1805 * Drop the BSS_FILTER bit 1806 * (there is no another way to change bssid). 1807 */ 1808 WPI_RXON_LOCK(sc); 1809 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1810 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1811 device_printf(sc->sc_dev, 1812 "%s: could not send RXON\n", 1813 __func__); 1814 } 1815 WPI_RXON_UNLOCK(sc); 1816 1817 /* Restore all what was lost. */ 1818 wpi_restore_node_table(sc, wvp); 1819 1820 /* XXX set conditionally? */ 1821 wpi_updateedca(ic); 1822 } 1823 } 1824 1825 /* 1826 * !RUN -> RUN requires setting the association id 1827 * which is done with a firmware cmd. We also defer 1828 * starting the timers until that work is done. 1829 */ 1830 if ((error = wpi_run(sc, vap)) != 0) { 1831 device_printf(sc->sc_dev, 1832 "%s: could not move to RUN state\n", __func__); 1833 } 1834 break; 1835 1836 default: 1837 break; 1838 } 1839 if (error != 0) { 1840 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1841 return error; 1842 } 1843 1844 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1845 1846 return wvp->wv_newstate(vap, nstate, arg); 1847 } 1848 1849 static void 1850 wpi_calib_timeout(void *arg) 1851 { 1852 struct wpi_softc *sc = arg; 1853 1854 if (wpi_check_bss_filter(sc) == 0) 1855 return; 1856 1857 wpi_power_calibration(sc); 1858 1859 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1860 } 1861 1862 static __inline uint8_t 1863 rate2plcp(const uint8_t rate) 1864 { 1865 switch (rate) { 1866 case 12: return 0xd; 1867 case 18: return 0xf; 1868 case 24: return 0x5; 1869 case 36: return 0x7; 1870 case 48: return 0x9; 1871 case 72: return 0xb; 1872 case 96: return 0x1; 1873 case 108: return 0x3; 1874 case 2: return 10; 1875 case 4: return 20; 1876 case 11: return 55; 1877 case 22: return 110; 1878 default: return 0; 1879 } 1880 } 1881 1882 static __inline uint8_t 1883 plcp2rate(const uint8_t plcp) 1884 { 1885 switch (plcp) { 1886 case 0xd: return 12; 1887 case 0xf: return 18; 1888 case 0x5: return 24; 1889 case 0x7: return 36; 1890 case 0x9: return 48; 1891 case 0xb: return 72; 1892 case 0x1: return 96; 1893 case 0x3: return 108; 1894 case 10: return 2; 1895 case 20: return 4; 1896 case 55: return 11; 1897 case 110: return 22; 1898 default: return 0; 1899 } 1900 } 1901 1902 /* Quickly determine if a given rate is CCK or OFDM. */ 1903 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1904 1905 static void 1906 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1907 struct wpi_rx_data *data) 1908 { 1909 struct ieee80211com *ic = &sc->sc_ic; 1910 struct wpi_rx_ring *ring = &sc->rxq; 1911 struct wpi_rx_stat *stat; 1912 struct wpi_rx_head *head; 1913 struct wpi_rx_tail *tail; 1914 struct ieee80211_frame *wh; 1915 struct ieee80211_node *ni; 1916 struct mbuf *m, *m1; 1917 bus_addr_t paddr; 1918 uint32_t flags; 1919 uint16_t len; 1920 int error; 1921 1922 stat = (struct wpi_rx_stat *)(desc + 1); 1923 1924 if (__predict_false(stat->len > WPI_STAT_MAXLEN)) { 1925 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1926 goto fail1; 1927 } 1928 1929 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1930 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1931 len = le16toh(head->len); 1932 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1933 flags = le32toh(tail->flags); 1934 1935 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1936 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1937 le32toh(desc->len), len, (int8_t)stat->rssi, 1938 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1939 1940 /* Discard frames with a bad FCS early. */ 1941 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1942 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1943 __func__, flags); 1944 goto fail1; 1945 } 1946 /* Discard frames that are too short. */ 1947 if (len < sizeof (struct ieee80211_frame_ack)) { 1948 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1949 __func__, len); 1950 goto fail1; 1951 } 1952 1953 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1954 if (__predict_false(m1 == NULL)) { 1955 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1956 __func__); 1957 goto fail1; 1958 } 1959 bus_dmamap_unload(ring->data_dmat, data->map); 1960 1961 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1962 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1963 if (__predict_false(error != 0 && error != EFBIG)) { 1964 device_printf(sc->sc_dev, 1965 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1966 m_freem(m1); 1967 1968 /* Try to reload the old mbuf. */ 1969 error = bus_dmamap_load(ring->data_dmat, data->map, 1970 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1971 &paddr, BUS_DMA_NOWAIT); 1972 if (error != 0 && error != EFBIG) { 1973 panic("%s: could not load old RX mbuf", __func__); 1974 } 1975 /* Physical address may have changed. */ 1976 ring->desc[ring->cur] = htole32(paddr); 1977 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1978 BUS_DMASYNC_PREWRITE); 1979 goto fail1; 1980 } 1981 1982 m = data->m; 1983 data->m = m1; 1984 /* Update RX descriptor. */ 1985 ring->desc[ring->cur] = htole32(paddr); 1986 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1987 BUS_DMASYNC_PREWRITE); 1988 1989 /* Finalize mbuf. */ 1990 m->m_data = (caddr_t)(head + 1); 1991 m->m_pkthdr.len = m->m_len = len; 1992 1993 /* Grab a reference to the source node. */ 1994 wh = mtod(m, struct ieee80211_frame *); 1995 1996 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1997 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 1998 /* Check whether decryption was successful or not. */ 1999 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2000 DPRINTF(sc, WPI_DEBUG_RECV, 2001 "CCMP decryption failed 0x%x\n", flags); 2002 goto fail2; 2003 } 2004 m->m_flags |= M_WEP; 2005 } 2006 2007 if (len >= sizeof(struct ieee80211_frame_min)) 2008 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2009 else 2010 ni = NULL; 2011 2012 sc->rx_tstamp = tail->tstamp; 2013 2014 if (ieee80211_radiotap_active(ic)) { 2015 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2016 2017 tap->wr_flags = 0; 2018 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2019 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2020 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2021 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2022 tap->wr_tsft = tail->tstamp; 2023 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2024 tap->wr_rate = plcp2rate(head->plcp); 2025 } 2026 2027 WPI_UNLOCK(sc); 2028 2029 /* Send the frame to the 802.11 layer. */ 2030 if (ni != NULL) { 2031 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2032 /* Node is no longer needed. */ 2033 ieee80211_free_node(ni); 2034 } else 2035 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2036 2037 WPI_LOCK(sc); 2038 2039 return; 2040 2041 fail2: m_freem(m); 2042 2043 fail1: counter_u64_add(ic->ic_ierrors, 1); 2044 } 2045 2046 static void 2047 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2048 struct wpi_rx_data *data) 2049 { 2050 /* Ignore */ 2051 } 2052 2053 static void 2054 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2055 { 2056 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 2057 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2058 struct wpi_tx_data *data = &ring->data[desc->idx]; 2059 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2060 struct mbuf *m; 2061 struct ieee80211_node *ni; 2062 uint32_t status = le32toh(stat->status); 2063 2064 KASSERT(data->ni != NULL, ("no node")); 2065 KASSERT(data->m != NULL, ("no mbuf")); 2066 2067 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2068 2069 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2070 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2071 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2072 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2073 2074 /* Unmap and free mbuf. */ 2075 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2076 bus_dmamap_unload(ring->data_dmat, data->map); 2077 m = data->m, data->m = NULL; 2078 ni = data->ni, data->ni = NULL; 2079 2080 /* Restore frame header. */ 2081 KASSERT(M_LEADINGSPACE(m) >= data->hdrlen, ("no frame header!")); 2082 M_PREPEND(m, data->hdrlen, M_NOWAIT); 2083 KASSERT(m != NULL, ("%s: m is NULL\n", __func__)); 2084 2085 /* 2086 * Update rate control statistics for the node. 2087 */ 2088 txs->pktlen = m->m_pkthdr.len; 2089 txs->short_retries = stat->rtsfailcnt; 2090 txs->long_retries = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2091 if (!(status & WPI_TX_STATUS_FAIL)) 2092 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 2093 else { 2094 switch (status & 0xff) { 2095 case WPI_TX_STATUS_FAIL_SHORT_LIMIT: 2096 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 2097 break; 2098 case WPI_TX_STATUS_FAIL_LONG_LIMIT: 2099 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 2100 break; 2101 case WPI_TX_STATUS_FAIL_LIFE_EXPIRE: 2102 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 2103 break; 2104 default: 2105 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 2106 break; 2107 } 2108 } 2109 2110 ieee80211_ratectl_tx_complete(ni, txs); 2111 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2112 2113 WPI_TXQ_STATE_LOCK(sc); 2114 if (--ring->queued > 0) 2115 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2116 else 2117 callout_stop(&sc->tx_timeout); 2118 WPI_TXQ_STATE_UNLOCK(sc); 2119 2120 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2121 } 2122 2123 /* 2124 * Process a "command done" firmware notification. This is where we wakeup 2125 * processes waiting for a synchronous command completion. 2126 */ 2127 static void 2128 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2129 { 2130 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2131 struct wpi_tx_data *data; 2132 struct wpi_tx_cmd *cmd; 2133 2134 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2135 "type %s len %d\n", desc->qid, desc->idx, 2136 desc->flags, wpi_cmd_str(desc->type), 2137 le32toh(desc->len)); 2138 2139 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2140 return; /* Not a command ack. */ 2141 2142 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2143 2144 data = &ring->data[desc->idx]; 2145 cmd = &ring->cmd[desc->idx]; 2146 2147 /* If the command was mapped in an mbuf, free it. */ 2148 if (data->m != NULL) { 2149 bus_dmamap_sync(ring->data_dmat, data->map, 2150 BUS_DMASYNC_POSTWRITE); 2151 bus_dmamap_unload(ring->data_dmat, data->map); 2152 m_freem(data->m); 2153 data->m = NULL; 2154 } 2155 2156 wakeup(cmd); 2157 2158 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2159 struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data; 2160 2161 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2162 BUS_DMASYNC_POSTREAD); 2163 2164 WPI_TXQ_LOCK(sc); 2165 if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) { 2166 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2167 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2168 } else { 2169 sc->sc_update_rx_ring = wpi_update_rx_ring; 2170 sc->sc_update_tx_ring = wpi_update_tx_ring; 2171 } 2172 WPI_TXQ_UNLOCK(sc); 2173 } 2174 } 2175 2176 static void 2177 wpi_notif_intr(struct wpi_softc *sc) 2178 { 2179 struct ieee80211com *ic = &sc->sc_ic; 2180 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2181 uint32_t hw; 2182 2183 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2184 BUS_DMASYNC_POSTREAD); 2185 2186 hw = le32toh(sc->shared->next) & 0xfff; 2187 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2188 2189 while (sc->rxq.cur != hw) { 2190 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2191 2192 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2193 struct wpi_rx_desc *desc; 2194 2195 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2196 BUS_DMASYNC_POSTREAD); 2197 desc = mtod(data->m, struct wpi_rx_desc *); 2198 2199 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2200 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2201 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2202 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2203 2204 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2205 /* Reply to a command. */ 2206 wpi_cmd_done(sc, desc); 2207 } 2208 2209 switch (desc->type) { 2210 case WPI_RX_DONE: 2211 /* An 802.11 frame has been received. */ 2212 wpi_rx_done(sc, desc, data); 2213 2214 if (__predict_false(sc->sc_running == 0)) { 2215 /* wpi_stop() was called. */ 2216 return; 2217 } 2218 2219 break; 2220 2221 case WPI_TX_DONE: 2222 /* An 802.11 frame has been transmitted. */ 2223 wpi_tx_done(sc, desc); 2224 break; 2225 2226 case WPI_RX_STATISTICS: 2227 case WPI_BEACON_STATISTICS: 2228 wpi_rx_statistics(sc, desc, data); 2229 break; 2230 2231 case WPI_BEACON_MISSED: 2232 { 2233 struct wpi_beacon_missed *miss = 2234 (struct wpi_beacon_missed *)(desc + 1); 2235 uint32_t expected, misses, received, threshold; 2236 2237 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2238 BUS_DMASYNC_POSTREAD); 2239 2240 misses = le32toh(miss->consecutive); 2241 expected = le32toh(miss->expected); 2242 received = le32toh(miss->received); 2243 threshold = MAX(2, vap->iv_bmissthreshold); 2244 2245 DPRINTF(sc, WPI_DEBUG_BMISS, 2246 "%s: beacons missed %u(%u) (received %u/%u)\n", 2247 __func__, misses, le32toh(miss->total), received, 2248 expected); 2249 2250 if (misses >= threshold || 2251 (received == 0 && expected >= threshold)) { 2252 WPI_RXON_LOCK(sc); 2253 if (callout_pending(&sc->scan_timeout)) { 2254 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2255 0, 1); 2256 } 2257 WPI_RXON_UNLOCK(sc); 2258 if (vap->iv_state == IEEE80211_S_RUN && 2259 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2260 ieee80211_beacon_miss(ic); 2261 } 2262 2263 break; 2264 } 2265 #ifdef WPI_DEBUG 2266 case WPI_BEACON_SENT: 2267 { 2268 struct wpi_tx_stat *stat = 2269 (struct wpi_tx_stat *)(desc + 1); 2270 uint64_t *tsf = (uint64_t *)(stat + 1); 2271 uint32_t *mode = (uint32_t *)(tsf + 1); 2272 2273 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2274 BUS_DMASYNC_POSTREAD); 2275 2276 DPRINTF(sc, WPI_DEBUG_BEACON, 2277 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2278 "duration %u, status %x, tsf %ju, mode %x\n", 2279 stat->rtsfailcnt, stat->ackfailcnt, 2280 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2281 le32toh(stat->status), le64toh(*tsf), 2282 le32toh(*mode)); 2283 2284 break; 2285 } 2286 #endif 2287 case WPI_UC_READY: 2288 { 2289 struct wpi_ucode_info *uc = 2290 (struct wpi_ucode_info *)(desc + 1); 2291 2292 /* The microcontroller is ready. */ 2293 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2294 BUS_DMASYNC_POSTREAD); 2295 DPRINTF(sc, WPI_DEBUG_RESET, 2296 "microcode alive notification version=%d.%d " 2297 "subtype=%x alive=%x\n", uc->major, uc->minor, 2298 uc->subtype, le32toh(uc->valid)); 2299 2300 if (le32toh(uc->valid) != 1) { 2301 device_printf(sc->sc_dev, 2302 "microcontroller initialization failed\n"); 2303 wpi_stop_locked(sc); 2304 return; 2305 } 2306 /* Save the address of the error log in SRAM. */ 2307 sc->errptr = le32toh(uc->errptr); 2308 break; 2309 } 2310 case WPI_STATE_CHANGED: 2311 { 2312 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2313 BUS_DMASYNC_POSTREAD); 2314 2315 uint32_t *status = (uint32_t *)(desc + 1); 2316 2317 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2318 le32toh(*status)); 2319 2320 if (le32toh(*status) & 1) { 2321 WPI_NT_LOCK(sc); 2322 wpi_clear_node_table(sc); 2323 WPI_NT_UNLOCK(sc); 2324 ieee80211_runtask(ic, 2325 &sc->sc_radiooff_task); 2326 return; 2327 } 2328 break; 2329 } 2330 #ifdef WPI_DEBUG 2331 case WPI_START_SCAN: 2332 { 2333 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2334 BUS_DMASYNC_POSTREAD); 2335 2336 struct wpi_start_scan *scan = 2337 (struct wpi_start_scan *)(desc + 1); 2338 DPRINTF(sc, WPI_DEBUG_SCAN, 2339 "%s: scanning channel %d status %x\n", 2340 __func__, scan->chan, le32toh(scan->status)); 2341 2342 break; 2343 } 2344 #endif 2345 case WPI_STOP_SCAN: 2346 { 2347 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2348 BUS_DMASYNC_POSTREAD); 2349 2350 struct wpi_stop_scan *scan = 2351 (struct wpi_stop_scan *)(desc + 1); 2352 2353 DPRINTF(sc, WPI_DEBUG_SCAN, 2354 "scan finished nchan=%d status=%d chan=%d\n", 2355 scan->nchan, scan->status, scan->chan); 2356 2357 WPI_RXON_LOCK(sc); 2358 callout_stop(&sc->scan_timeout); 2359 WPI_RXON_UNLOCK(sc); 2360 if (scan->status == WPI_SCAN_ABORTED) 2361 ieee80211_cancel_scan(vap); 2362 else 2363 ieee80211_scan_next(vap); 2364 break; 2365 } 2366 } 2367 2368 if (sc->rxq.cur % 8 == 0) { 2369 /* Tell the firmware what we have processed. */ 2370 sc->sc_update_rx_ring(sc); 2371 } 2372 } 2373 } 2374 2375 /* 2376 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2377 * from power-down sleep mode. 2378 */ 2379 static void 2380 wpi_wakeup_intr(struct wpi_softc *sc) 2381 { 2382 int qid; 2383 2384 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2385 "%s: ucode wakeup from power-down sleep\n", __func__); 2386 2387 /* Wakeup RX and TX rings. */ 2388 if (sc->rxq.update) { 2389 sc->rxq.update = 0; 2390 wpi_update_rx_ring(sc); 2391 } 2392 WPI_TXQ_LOCK(sc); 2393 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2394 struct wpi_tx_ring *ring = &sc->txq[qid]; 2395 2396 if (ring->update) { 2397 ring->update = 0; 2398 wpi_update_tx_ring(sc, ring); 2399 } 2400 } 2401 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2402 WPI_TXQ_UNLOCK(sc); 2403 } 2404 2405 /* 2406 * This function prints firmware registers 2407 */ 2408 #ifdef WPI_DEBUG 2409 static void 2410 wpi_debug_registers(struct wpi_softc *sc) 2411 { 2412 size_t i; 2413 static const uint32_t csr_tbl[] = { 2414 WPI_HW_IF_CONFIG, 2415 WPI_INT, 2416 WPI_INT_MASK, 2417 WPI_FH_INT, 2418 WPI_GPIO_IN, 2419 WPI_RESET, 2420 WPI_GP_CNTRL, 2421 WPI_EEPROM, 2422 WPI_EEPROM_GP, 2423 WPI_GIO, 2424 WPI_UCODE_GP1, 2425 WPI_UCODE_GP2, 2426 WPI_GIO_CHICKEN, 2427 WPI_ANA_PLL, 2428 WPI_DBG_HPET_MEM, 2429 }; 2430 static const uint32_t prph_tbl[] = { 2431 WPI_APMG_CLK_CTRL, 2432 WPI_APMG_PS, 2433 WPI_APMG_PCI_STT, 2434 WPI_APMG_RFKILL, 2435 }; 2436 2437 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2438 2439 for (i = 0; i < nitems(csr_tbl); i++) { 2440 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2441 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2442 2443 if ((i + 1) % 2 == 0) 2444 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2445 } 2446 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2447 2448 if (wpi_nic_lock(sc) == 0) { 2449 for (i = 0; i < nitems(prph_tbl); i++) { 2450 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2451 wpi_get_prph_string(prph_tbl[i]), 2452 wpi_prph_read(sc, prph_tbl[i])); 2453 2454 if ((i + 1) % 2 == 0) 2455 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2456 } 2457 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2458 wpi_nic_unlock(sc); 2459 } else { 2460 DPRINTF(sc, WPI_DEBUG_REGISTER, 2461 "Cannot access internal registers.\n"); 2462 } 2463 } 2464 #endif 2465 2466 /* 2467 * Dump the error log of the firmware when a firmware panic occurs. Although 2468 * we can't debug the firmware because it is neither open source nor free, it 2469 * can help us to identify certain classes of problems. 2470 */ 2471 static void 2472 wpi_fatal_intr(struct wpi_softc *sc) 2473 { 2474 struct wpi_fw_dump dump; 2475 uint32_t i, offset, count; 2476 2477 /* Check that the error log address is valid. */ 2478 if (sc->errptr < WPI_FW_DATA_BASE || 2479 sc->errptr + sizeof (dump) > 2480 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2481 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2482 sc->errptr); 2483 return; 2484 } 2485 if (wpi_nic_lock(sc) != 0) { 2486 printf("%s: could not read firmware error log\n", __func__); 2487 return; 2488 } 2489 /* Read number of entries in the log. */ 2490 count = wpi_mem_read(sc, sc->errptr); 2491 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2492 printf("%s: invalid count field (count = %u)\n", __func__, 2493 count); 2494 wpi_nic_unlock(sc); 2495 return; 2496 } 2497 /* Skip "count" field. */ 2498 offset = sc->errptr + sizeof (uint32_t); 2499 printf("firmware error log (count = %u):\n", count); 2500 for (i = 0; i < count; i++) { 2501 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2502 sizeof (dump) / sizeof (uint32_t)); 2503 2504 printf(" error type = \"%s\" (0x%08X)\n", 2505 (dump.desc < nitems(wpi_fw_errmsg)) ? 2506 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2507 dump.desc); 2508 printf(" error data = 0x%08X\n", 2509 dump.data); 2510 printf(" branch link = 0x%08X%08X\n", 2511 dump.blink[0], dump.blink[1]); 2512 printf(" interrupt link = 0x%08X%08X\n", 2513 dump.ilink[0], dump.ilink[1]); 2514 printf(" time = %u\n", dump.time); 2515 2516 offset += sizeof (dump); 2517 } 2518 wpi_nic_unlock(sc); 2519 /* Dump driver status (TX and RX rings) while we're here. */ 2520 printf("driver status:\n"); 2521 WPI_TXQ_LOCK(sc); 2522 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2523 struct wpi_tx_ring *ring = &sc->txq[i]; 2524 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2525 i, ring->qid, ring->cur, ring->queued); 2526 } 2527 WPI_TXQ_UNLOCK(sc); 2528 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2529 } 2530 2531 static void 2532 wpi_intr(void *arg) 2533 { 2534 struct wpi_softc *sc = arg; 2535 uint32_t r1, r2; 2536 2537 WPI_LOCK(sc); 2538 2539 /* Disable interrupts. */ 2540 WPI_WRITE(sc, WPI_INT_MASK, 0); 2541 2542 r1 = WPI_READ(sc, WPI_INT); 2543 2544 if (__predict_false(r1 == 0xffffffff || 2545 (r1 & 0xfffffff0) == 0xa5a5a5a0)) 2546 goto end; /* Hardware gone! */ 2547 2548 r2 = WPI_READ(sc, WPI_FH_INT); 2549 2550 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2551 r1, r2); 2552 2553 if (r1 == 0 && r2 == 0) 2554 goto done; /* Interrupt not for us. */ 2555 2556 /* Acknowledge interrupts. */ 2557 WPI_WRITE(sc, WPI_INT, r1); 2558 WPI_WRITE(sc, WPI_FH_INT, r2); 2559 2560 if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) { 2561 struct ieee80211com *ic = &sc->sc_ic; 2562 2563 device_printf(sc->sc_dev, "fatal firmware error\n"); 2564 #ifdef WPI_DEBUG 2565 wpi_debug_registers(sc); 2566 #endif 2567 wpi_fatal_intr(sc); 2568 DPRINTF(sc, WPI_DEBUG_HW, 2569 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2570 "(Hardware Error)"); 2571 ieee80211_restart_all(ic); 2572 goto end; 2573 } 2574 2575 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2576 (r2 & WPI_FH_INT_RX)) 2577 wpi_notif_intr(sc); 2578 2579 if (r1 & WPI_INT_ALIVE) 2580 wakeup(sc); /* Firmware is alive. */ 2581 2582 if (r1 & WPI_INT_WAKEUP) 2583 wpi_wakeup_intr(sc); 2584 2585 done: 2586 /* Re-enable interrupts. */ 2587 if (__predict_true(sc->sc_running)) 2588 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2589 2590 end: WPI_UNLOCK(sc); 2591 } 2592 2593 static void 2594 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac) 2595 { 2596 struct wpi_tx_ring *ring; 2597 struct wpi_tx_data *data; 2598 uint8_t cur; 2599 2600 WPI_TXQ_LOCK(sc); 2601 ring = &sc->txq[ac]; 2602 2603 while (ring->pending != 0) { 2604 ring->pending--; 2605 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2606 data = &ring->data[cur]; 2607 2608 bus_dmamap_sync(ring->data_dmat, data->map, 2609 BUS_DMASYNC_POSTWRITE); 2610 bus_dmamap_unload(ring->data_dmat, data->map); 2611 m_freem(data->m); 2612 data->m = NULL; 2613 2614 ieee80211_node_decref(data->ni); 2615 data->ni = NULL; 2616 } 2617 2618 WPI_TXQ_UNLOCK(sc); 2619 } 2620 2621 static int 2622 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2623 { 2624 struct ieee80211_frame *wh; 2625 struct wpi_tx_cmd *cmd; 2626 struct wpi_tx_data *data; 2627 struct wpi_tx_desc *desc; 2628 struct wpi_tx_ring *ring; 2629 struct mbuf *m1; 2630 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2631 uint8_t cur, pad; 2632 uint16_t hdrlen; 2633 int error, i, nsegs, totlen, frag; 2634 2635 WPI_TXQ_LOCK(sc); 2636 2637 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2638 2639 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2640 2641 if (__predict_false(sc->sc_running == 0)) { 2642 /* wpi_stop() was called */ 2643 error = ENETDOWN; 2644 goto end; 2645 } 2646 2647 wh = mtod(buf->m, struct ieee80211_frame *); 2648 hdrlen = ieee80211_anyhdrsize(wh); 2649 totlen = buf->m->m_pkthdr.len; 2650 frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG); 2651 2652 if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) { 2653 error = EINVAL; 2654 goto end; 2655 } 2656 2657 if (hdrlen & 3) { 2658 /* First segment length must be a multiple of 4. */ 2659 pad = 4 - (hdrlen & 3); 2660 } else 2661 pad = 0; 2662 2663 ring = &sc->txq[buf->ac]; 2664 cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2665 desc = &ring->desc[cur]; 2666 data = &ring->data[cur]; 2667 2668 /* Prepare TX firmware command. */ 2669 cmd = &ring->cmd[cur]; 2670 cmd->code = buf->code; 2671 cmd->flags = 0; 2672 cmd->qid = ring->qid; 2673 cmd->idx = cur; 2674 2675 memcpy(cmd->data, buf->data, buf->size); 2676 2677 /* Save and trim IEEE802.11 header. */ 2678 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2679 m_adj(buf->m, hdrlen); 2680 2681 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2682 segs, &nsegs, BUS_DMA_NOWAIT); 2683 if (error != 0 && error != EFBIG) { 2684 device_printf(sc->sc_dev, 2685 "%s: can't map mbuf (error %d)\n", __func__, error); 2686 goto end; 2687 } 2688 if (error != 0) { 2689 /* Too many DMA segments, linearize mbuf. */ 2690 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2691 if (m1 == NULL) { 2692 device_printf(sc->sc_dev, 2693 "%s: could not defrag mbuf\n", __func__); 2694 error = ENOBUFS; 2695 goto end; 2696 } 2697 buf->m = m1; 2698 2699 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2700 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2701 if (__predict_false(error != 0)) { 2702 /* XXX fix this (applicable to the iwn(4) too) */ 2703 /* 2704 * NB: Do not return error; 2705 * original mbuf does not exist anymore. 2706 */ 2707 device_printf(sc->sc_dev, 2708 "%s: can't map mbuf (error %d)\n", __func__, 2709 error); 2710 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2711 if_inc_counter(buf->ni->ni_vap->iv_ifp, 2712 IFCOUNTER_OERRORS, 1); 2713 if (!frag) 2714 ieee80211_free_node(buf->ni); 2715 } 2716 m_freem(buf->m); 2717 error = 0; 2718 goto end; 2719 } 2720 } 2721 2722 KASSERT(nsegs < WPI_MAX_SCATTER, 2723 ("too many DMA segments, nsegs (%d) should be less than %d", 2724 nsegs, WPI_MAX_SCATTER)); 2725 2726 data->m = buf->m; 2727 data->ni = buf->ni; 2728 data->hdrlen = hdrlen; 2729 2730 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2731 __func__, ring->qid, cur, totlen, nsegs); 2732 2733 /* Fill TX descriptor. */ 2734 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2735 /* First DMA segment is used by the TX command. */ 2736 desc->segs[0].addr = htole32(data->cmd_paddr); 2737 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2738 /* Other DMA segments are for data payload. */ 2739 seg = &segs[0]; 2740 for (i = 1; i <= nsegs; i++) { 2741 desc->segs[i].addr = htole32(seg->ds_addr); 2742 desc->segs[i].len = htole32(seg->ds_len); 2743 seg++; 2744 } 2745 2746 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2747 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2748 BUS_DMASYNC_PREWRITE); 2749 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2750 BUS_DMASYNC_PREWRITE); 2751 2752 ring->pending += 1; 2753 2754 if (!frag) { 2755 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2756 WPI_TXQ_STATE_LOCK(sc); 2757 ring->queued += ring->pending; 2758 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, 2759 sc); 2760 WPI_TXQ_STATE_UNLOCK(sc); 2761 } 2762 2763 /* Kick TX ring. */ 2764 ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT; 2765 ring->pending = 0; 2766 sc->sc_update_tx_ring(sc, ring); 2767 } else 2768 (void) ieee80211_ref_node(data->ni); 2769 2770 end: DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 2771 __func__); 2772 2773 WPI_TXQ_UNLOCK(sc); 2774 2775 return (error); 2776 } 2777 2778 /* 2779 * Construct the data packet for a transmit buffer. 2780 */ 2781 static int 2782 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2783 { 2784 const struct ieee80211_txparam *tp = ni->ni_txparms; 2785 struct ieee80211vap *vap = ni->ni_vap; 2786 struct ieee80211com *ic = ni->ni_ic; 2787 struct wpi_node *wn = WPI_NODE(ni); 2788 struct ieee80211_frame *wh; 2789 struct ieee80211_key *k = NULL; 2790 struct wpi_buf tx_data; 2791 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2792 uint32_t flags; 2793 uint16_t ac, qos; 2794 uint8_t tid, type, rate; 2795 int swcrypt, ismcast, totlen; 2796 2797 wh = mtod(m, struct ieee80211_frame *); 2798 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2799 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2800 swcrypt = 1; 2801 2802 /* Select EDCA Access Category and TX ring for this frame. */ 2803 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2804 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2805 tid = qos & IEEE80211_QOS_TID; 2806 } else { 2807 qos = 0; 2808 tid = 0; 2809 } 2810 ac = M_WME_GETAC(m); 2811 2812 /* Choose a TX rate index. */ 2813 if (type == IEEE80211_FC0_TYPE_MGT || 2814 type == IEEE80211_FC0_TYPE_CTL || 2815 (m->m_flags & M_EAPOL) != 0) 2816 rate = tp->mgmtrate; 2817 else if (ismcast) 2818 rate = tp->mcastrate; 2819 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2820 rate = tp->ucastrate; 2821 else { 2822 /* XXX pass pktlen */ 2823 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2824 rate = ni->ni_txrate; 2825 } 2826 2827 /* Encrypt the frame if need be. */ 2828 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2829 /* Retrieve key for TX. */ 2830 k = ieee80211_crypto_encap(ni, m); 2831 if (k == NULL) 2832 return (ENOBUFS); 2833 2834 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2835 2836 /* 802.11 header may have moved. */ 2837 wh = mtod(m, struct ieee80211_frame *); 2838 } 2839 totlen = m->m_pkthdr.len; 2840 2841 if (ieee80211_radiotap_active_vap(vap)) { 2842 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2843 2844 tap->wt_flags = 0; 2845 tap->wt_rate = rate; 2846 if (k != NULL) 2847 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2848 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2849 tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2850 2851 ieee80211_radiotap_tx(vap, m); 2852 } 2853 2854 flags = 0; 2855 if (!ismcast) { 2856 /* Unicast frame, check if an ACK is expected. */ 2857 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2858 IEEE80211_QOS_ACKPOLICY_NOACK) 2859 flags |= WPI_TX_NEED_ACK; 2860 } 2861 2862 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2863 flags |= WPI_TX_AUTO_SEQ; 2864 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2865 flags |= WPI_TX_MORE_FRAG; 2866 2867 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2868 if (!ismcast) { 2869 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2870 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2871 flags |= WPI_TX_NEED_RTS; 2872 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2873 WPI_RATE_IS_OFDM(rate)) { 2874 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2875 flags |= WPI_TX_NEED_CTS; 2876 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2877 flags |= WPI_TX_NEED_RTS; 2878 } 2879 2880 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2881 flags |= WPI_TX_FULL_TXOP; 2882 } 2883 2884 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2885 if (type == IEEE80211_FC0_TYPE_MGT) { 2886 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2887 2888 /* Tell HW to set timestamp in probe responses. */ 2889 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2890 flags |= WPI_TX_INSERT_TSTAMP; 2891 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2892 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2893 tx->timeout = htole16(3); 2894 else 2895 tx->timeout = htole16(2); 2896 } 2897 2898 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2899 tx->id = WPI_ID_BROADCAST; 2900 else { 2901 if (wn->id == WPI_ID_UNDEFINED) { 2902 device_printf(sc->sc_dev, 2903 "%s: undefined node id\n", __func__); 2904 return (EINVAL); 2905 } 2906 2907 tx->id = wn->id; 2908 } 2909 2910 if (!swcrypt) { 2911 switch (k->wk_cipher->ic_cipher) { 2912 case IEEE80211_CIPHER_AES_CCM: 2913 tx->security = WPI_CIPHER_CCMP; 2914 break; 2915 2916 default: 2917 break; 2918 } 2919 2920 memcpy(tx->key, k->wk_key, k->wk_keylen); 2921 } 2922 2923 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 2924 struct mbuf *next = m->m_nextpkt; 2925 2926 tx->lnext = htole16(next->m_pkthdr.len); 2927 tx->fnext = htole32(tx->security | 2928 (flags & WPI_TX_NEED_ACK) | 2929 WPI_NEXT_STA_ID(tx->id)); 2930 } 2931 2932 tx->len = htole16(totlen); 2933 tx->flags = htole32(flags); 2934 tx->plcp = rate2plcp(rate); 2935 tx->tid = tid; 2936 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2937 tx->ofdm_mask = 0xff; 2938 tx->cck_mask = 0x0f; 2939 tx->rts_ntries = 7; 2940 tx->data_ntries = tp->maxretry; 2941 2942 tx_data.ni = ni; 2943 tx_data.m = m; 2944 tx_data.size = sizeof(struct wpi_cmd_data); 2945 tx_data.code = WPI_CMD_TX_DATA; 2946 tx_data.ac = ac; 2947 2948 return wpi_cmd2(sc, &tx_data); 2949 } 2950 2951 static int 2952 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2953 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2954 { 2955 struct ieee80211vap *vap = ni->ni_vap; 2956 struct ieee80211_key *k = NULL; 2957 struct ieee80211_frame *wh; 2958 struct wpi_buf tx_data; 2959 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2960 uint32_t flags; 2961 uint8_t ac, type, rate; 2962 int swcrypt, totlen; 2963 2964 wh = mtod(m, struct ieee80211_frame *); 2965 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2966 swcrypt = 1; 2967 2968 ac = params->ibp_pri & 3; 2969 2970 /* Choose a TX rate index. */ 2971 rate = params->ibp_rate0; 2972 2973 flags = 0; 2974 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2975 flags |= WPI_TX_AUTO_SEQ; 2976 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2977 flags |= WPI_TX_NEED_ACK; 2978 if (params->ibp_flags & IEEE80211_BPF_RTS) 2979 flags |= WPI_TX_NEED_RTS; 2980 if (params->ibp_flags & IEEE80211_BPF_CTS) 2981 flags |= WPI_TX_NEED_CTS; 2982 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2983 flags |= WPI_TX_FULL_TXOP; 2984 2985 /* Encrypt the frame if need be. */ 2986 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2987 /* Retrieve key for TX. */ 2988 k = ieee80211_crypto_encap(ni, m); 2989 if (k == NULL) 2990 return (ENOBUFS); 2991 2992 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2993 2994 /* 802.11 header may have moved. */ 2995 wh = mtod(m, struct ieee80211_frame *); 2996 } 2997 totlen = m->m_pkthdr.len; 2998 2999 if (ieee80211_radiotap_active_vap(vap)) { 3000 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 3001 3002 tap->wt_flags = 0; 3003 tap->wt_rate = rate; 3004 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 3005 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3006 3007 ieee80211_radiotap_tx(vap, m); 3008 } 3009 3010 memset(tx, 0, sizeof (struct wpi_cmd_data)); 3011 if (type == IEEE80211_FC0_TYPE_MGT) { 3012 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3013 3014 /* Tell HW to set timestamp in probe responses. */ 3015 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3016 flags |= WPI_TX_INSERT_TSTAMP; 3017 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3018 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3019 tx->timeout = htole16(3); 3020 else 3021 tx->timeout = htole16(2); 3022 } 3023 3024 if (!swcrypt) { 3025 switch (k->wk_cipher->ic_cipher) { 3026 case IEEE80211_CIPHER_AES_CCM: 3027 tx->security = WPI_CIPHER_CCMP; 3028 break; 3029 3030 default: 3031 break; 3032 } 3033 3034 memcpy(tx->key, k->wk_key, k->wk_keylen); 3035 } 3036 3037 tx->len = htole16(totlen); 3038 tx->flags = htole32(flags); 3039 tx->plcp = rate2plcp(rate); 3040 tx->id = WPI_ID_BROADCAST; 3041 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3042 tx->rts_ntries = params->ibp_try1; 3043 tx->data_ntries = params->ibp_try0; 3044 3045 tx_data.ni = ni; 3046 tx_data.m = m; 3047 tx_data.size = sizeof(struct wpi_cmd_data); 3048 tx_data.code = WPI_CMD_TX_DATA; 3049 tx_data.ac = ac; 3050 3051 return wpi_cmd2(sc, &tx_data); 3052 } 3053 3054 static __inline int 3055 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac) 3056 { 3057 struct wpi_tx_ring *ring = &sc->txq[ac]; 3058 int retval; 3059 3060 WPI_TXQ_STATE_LOCK(sc); 3061 retval = WPI_TX_RING_HIMARK - ring->queued; 3062 WPI_TXQ_STATE_UNLOCK(sc); 3063 3064 return retval; 3065 } 3066 3067 static int 3068 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3069 const struct ieee80211_bpf_params *params) 3070 { 3071 struct ieee80211com *ic = ni->ni_ic; 3072 struct wpi_softc *sc = ic->ic_softc; 3073 uint16_t ac; 3074 int error = 0; 3075 3076 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3077 3078 ac = M_WME_GETAC(m); 3079 3080 WPI_TX_LOCK(sc); 3081 3082 /* NB: no fragments here */ 3083 if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) { 3084 error = sc->sc_running ? ENOBUFS : ENETDOWN; 3085 goto unlock; 3086 } 3087 3088 if (params == NULL) { 3089 /* 3090 * Legacy path; interpret frame contents to decide 3091 * precisely how to send the frame. 3092 */ 3093 error = wpi_tx_data(sc, m, ni); 3094 } else { 3095 /* 3096 * Caller supplied explicit parameters to use in 3097 * sending the frame. 3098 */ 3099 error = wpi_tx_data_raw(sc, m, ni, params); 3100 } 3101 3102 unlock: WPI_TX_UNLOCK(sc); 3103 3104 if (error != 0) { 3105 m_freem(m); 3106 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3107 3108 return error; 3109 } 3110 3111 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3112 3113 return 0; 3114 } 3115 3116 static int 3117 wpi_transmit(struct ieee80211com *ic, struct mbuf *m) 3118 { 3119 struct wpi_softc *sc = ic->ic_softc; 3120 struct ieee80211_node *ni; 3121 struct mbuf *mnext; 3122 uint16_t ac; 3123 int error, nmbufs; 3124 3125 WPI_TX_LOCK(sc); 3126 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3127 3128 /* Check if interface is up & running. */ 3129 if (__predict_false(sc->sc_running == 0)) { 3130 error = ENXIO; 3131 goto unlock; 3132 } 3133 3134 nmbufs = 1; 3135 for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt) 3136 nmbufs++; 3137 3138 /* Check for available space. */ 3139 ac = M_WME_GETAC(m); 3140 if (wpi_tx_ring_free_space(sc, ac) < nmbufs) { 3141 error = ENOBUFS; 3142 goto unlock; 3143 } 3144 3145 error = 0; 3146 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3147 do { 3148 mnext = m->m_nextpkt; 3149 if (wpi_tx_data(sc, m, ni) != 0) { 3150 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 3151 nmbufs); 3152 wpi_free_txfrags(sc, ac); 3153 ieee80211_free_mbuf(m); 3154 ieee80211_free_node(ni); 3155 break; 3156 } 3157 } while((m = mnext) != NULL); 3158 3159 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3160 3161 unlock: WPI_TX_UNLOCK(sc); 3162 3163 return (error); 3164 } 3165 3166 static void 3167 wpi_watchdog_rfkill(void *arg) 3168 { 3169 struct wpi_softc *sc = arg; 3170 struct ieee80211com *ic = &sc->sc_ic; 3171 3172 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3173 3174 /* No need to lock firmware memory. */ 3175 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3176 /* Radio kill switch is still off. */ 3177 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3178 sc); 3179 } else 3180 ieee80211_runtask(ic, &sc->sc_radioon_task); 3181 } 3182 3183 static void 3184 wpi_scan_timeout(void *arg) 3185 { 3186 struct wpi_softc *sc = arg; 3187 struct ieee80211com *ic = &sc->sc_ic; 3188 3189 ic_printf(ic, "scan timeout\n"); 3190 ieee80211_restart_all(ic); 3191 } 3192 3193 static void 3194 wpi_tx_timeout(void *arg) 3195 { 3196 struct wpi_softc *sc = arg; 3197 struct ieee80211com *ic = &sc->sc_ic; 3198 3199 ic_printf(ic, "device timeout\n"); 3200 ieee80211_restart_all(ic); 3201 } 3202 3203 static void 3204 wpi_parent(struct ieee80211com *ic) 3205 { 3206 struct wpi_softc *sc = ic->ic_softc; 3207 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3208 3209 if (ic->ic_nrunning > 0) { 3210 if (wpi_init(sc) == 0) { 3211 ieee80211_notify_radio(ic, 1); 3212 ieee80211_start_all(ic); 3213 } else { 3214 ieee80211_notify_radio(ic, 0); 3215 ieee80211_stop(vap); 3216 } 3217 } else { 3218 ieee80211_notify_radio(ic, 0); 3219 wpi_stop(sc); 3220 } 3221 } 3222 3223 /* 3224 * Send a command to the firmware. 3225 */ 3226 static int 3227 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size, 3228 int async) 3229 { 3230 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3231 struct wpi_tx_desc *desc; 3232 struct wpi_tx_data *data; 3233 struct wpi_tx_cmd *cmd; 3234 struct mbuf *m; 3235 bus_addr_t paddr; 3236 uint16_t totlen; 3237 int error; 3238 3239 WPI_TXQ_LOCK(sc); 3240 3241 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3242 3243 if (__predict_false(sc->sc_running == 0)) { 3244 /* wpi_stop() was called */ 3245 if (code == WPI_CMD_SCAN) 3246 error = ENETDOWN; 3247 else 3248 error = 0; 3249 3250 goto fail; 3251 } 3252 3253 if (async == 0) 3254 WPI_LOCK_ASSERT(sc); 3255 3256 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n", 3257 __func__, wpi_cmd_str(code), size, async); 3258 3259 desc = &ring->desc[ring->cur]; 3260 data = &ring->data[ring->cur]; 3261 totlen = 4 + size; 3262 3263 if (size > sizeof cmd->data) { 3264 /* Command is too large to fit in a descriptor. */ 3265 if (totlen > MCLBYTES) { 3266 error = EINVAL; 3267 goto fail; 3268 } 3269 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3270 if (m == NULL) { 3271 error = ENOMEM; 3272 goto fail; 3273 } 3274 cmd = mtod(m, struct wpi_tx_cmd *); 3275 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3276 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3277 if (error != 0) { 3278 m_freem(m); 3279 goto fail; 3280 } 3281 data->m = m; 3282 } else { 3283 cmd = &ring->cmd[ring->cur]; 3284 paddr = data->cmd_paddr; 3285 } 3286 3287 cmd->code = code; 3288 cmd->flags = 0; 3289 cmd->qid = ring->qid; 3290 cmd->idx = ring->cur; 3291 memcpy(cmd->data, buf, size); 3292 3293 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3294 desc->segs[0].addr = htole32(paddr); 3295 desc->segs[0].len = htole32(totlen); 3296 3297 if (size > sizeof cmd->data) { 3298 bus_dmamap_sync(ring->data_dmat, data->map, 3299 BUS_DMASYNC_PREWRITE); 3300 } else { 3301 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3302 BUS_DMASYNC_PREWRITE); 3303 } 3304 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3305 BUS_DMASYNC_PREWRITE); 3306 3307 /* Kick command ring. */ 3308 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3309 sc->sc_update_tx_ring(sc, ring); 3310 3311 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3312 3313 WPI_TXQ_UNLOCK(sc); 3314 3315 return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3316 3317 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3318 3319 WPI_TXQ_UNLOCK(sc); 3320 3321 return error; 3322 } 3323 3324 /* 3325 * Configure HW multi-rate retries. 3326 */ 3327 static int 3328 wpi_mrr_setup(struct wpi_softc *sc) 3329 { 3330 struct ieee80211com *ic = &sc->sc_ic; 3331 struct wpi_mrr_setup mrr; 3332 uint8_t i; 3333 int error; 3334 3335 /* CCK rates (not used with 802.11a). */ 3336 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3337 mrr.rates[i].flags = 0; 3338 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3339 /* Fallback to the immediate lower CCK rate (if any.) */ 3340 mrr.rates[i].next = 3341 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3342 /* Try twice at this rate before falling back to "next". */ 3343 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3344 } 3345 /* OFDM rates (not used with 802.11b). */ 3346 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3347 mrr.rates[i].flags = 0; 3348 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3349 /* Fallback to the immediate lower rate (if any.) */ 3350 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3351 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3352 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3353 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3354 i - 1; 3355 /* Try twice at this rate before falling back to "next". */ 3356 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3357 } 3358 /* Setup MRR for control frames. */ 3359 mrr.which = htole32(WPI_MRR_CTL); 3360 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3361 if (error != 0) { 3362 device_printf(sc->sc_dev, 3363 "could not setup MRR for control frames\n"); 3364 return error; 3365 } 3366 /* Setup MRR for data frames. */ 3367 mrr.which = htole32(WPI_MRR_DATA); 3368 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3369 if (error != 0) { 3370 device_printf(sc->sc_dev, 3371 "could not setup MRR for data frames\n"); 3372 return error; 3373 } 3374 return 0; 3375 } 3376 3377 static int 3378 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3379 { 3380 struct ieee80211com *ic = ni->ni_ic; 3381 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3382 struct wpi_node *wn = WPI_NODE(ni); 3383 struct wpi_node_info node; 3384 int error; 3385 3386 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3387 3388 if (wn->id == WPI_ID_UNDEFINED) 3389 return EINVAL; 3390 3391 memset(&node, 0, sizeof node); 3392 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3393 node.id = wn->id; 3394 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3395 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3396 node.action = htole32(WPI_ACTION_SET_RATE); 3397 node.antenna = WPI_ANTENNA_BOTH; 3398 3399 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3400 wn->id, ether_sprintf(ni->ni_macaddr)); 3401 3402 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3403 if (error != 0) { 3404 device_printf(sc->sc_dev, 3405 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3406 error); 3407 return error; 3408 } 3409 3410 if (wvp->wv_gtk != 0) { 3411 error = wpi_set_global_keys(ni); 3412 if (error != 0) { 3413 device_printf(sc->sc_dev, 3414 "%s: error while setting global keys\n", __func__); 3415 return ENXIO; 3416 } 3417 } 3418 3419 return 0; 3420 } 3421 3422 /* 3423 * Broadcast node is used to send group-addressed and management frames. 3424 */ 3425 static int 3426 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3427 { 3428 struct ieee80211com *ic = &sc->sc_ic; 3429 struct wpi_node_info node; 3430 3431 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3432 3433 memset(&node, 0, sizeof node); 3434 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 3435 node.id = WPI_ID_BROADCAST; 3436 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3437 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3438 node.action = htole32(WPI_ACTION_SET_RATE); 3439 node.antenna = WPI_ANTENNA_BOTH; 3440 3441 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3442 3443 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3444 } 3445 3446 static int 3447 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3448 { 3449 struct wpi_node *wn = WPI_NODE(ni); 3450 int error; 3451 3452 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3453 3454 wn->id = wpi_add_node_entry_sta(sc); 3455 3456 if ((error = wpi_add_node(sc, ni)) != 0) { 3457 wpi_del_node_entry(sc, wn->id); 3458 wn->id = WPI_ID_UNDEFINED; 3459 return error; 3460 } 3461 3462 return 0; 3463 } 3464 3465 static int 3466 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3467 { 3468 struct wpi_node *wn = WPI_NODE(ni); 3469 int error; 3470 3471 KASSERT(wn->id == WPI_ID_UNDEFINED, 3472 ("the node %d was added before", wn->id)); 3473 3474 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3475 3476 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3477 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3478 return ENOMEM; 3479 } 3480 3481 if ((error = wpi_add_node(sc, ni)) != 0) { 3482 wpi_del_node_entry(sc, wn->id); 3483 wn->id = WPI_ID_UNDEFINED; 3484 return error; 3485 } 3486 3487 return 0; 3488 } 3489 3490 static void 3491 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3492 { 3493 struct wpi_node *wn = WPI_NODE(ni); 3494 struct wpi_cmd_del_node node; 3495 int error; 3496 3497 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3498 3499 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3500 3501 memset(&node, 0, sizeof node); 3502 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3503 node.count = 1; 3504 3505 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3506 wn->id, ether_sprintf(ni->ni_macaddr)); 3507 3508 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3509 if (error != 0) { 3510 device_printf(sc->sc_dev, 3511 "%s: could not delete node %u, error %d\n", __func__, 3512 wn->id, error); 3513 } 3514 } 3515 3516 static int 3517 wpi_updateedca(struct ieee80211com *ic) 3518 { 3519 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3520 struct wpi_softc *sc = ic->ic_softc; 3521 struct chanAccParams chp; 3522 struct wpi_edca_params cmd; 3523 int aci, error; 3524 3525 ieee80211_wme_ic_getparams(ic, &chp); 3526 3527 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3528 3529 memset(&cmd, 0, sizeof cmd); 3530 cmd.flags = htole32(WPI_EDCA_UPDATE); 3531 for (aci = 0; aci < WME_NUM_AC; aci++) { 3532 const struct wmeParams *ac = &chp.cap_wmeParams[aci]; 3533 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3534 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3535 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3536 cmd.ac[aci].txoplimit = 3537 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3538 3539 DPRINTF(sc, WPI_DEBUG_EDCA, 3540 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3541 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3542 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3543 cmd.ac[aci].txoplimit); 3544 } 3545 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3546 3547 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3548 3549 return error; 3550 #undef WPI_EXP2 3551 } 3552 3553 static void 3554 wpi_set_promisc(struct wpi_softc *sc) 3555 { 3556 struct ieee80211com *ic = &sc->sc_ic; 3557 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3558 uint32_t promisc_filter; 3559 3560 promisc_filter = WPI_FILTER_CTL; 3561 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3562 promisc_filter |= WPI_FILTER_PROMISC; 3563 3564 if (ic->ic_promisc > 0) 3565 sc->rxon.filter |= htole32(promisc_filter); 3566 else 3567 sc->rxon.filter &= ~htole32(promisc_filter); 3568 } 3569 3570 static void 3571 wpi_update_promisc(struct ieee80211com *ic) 3572 { 3573 struct wpi_softc *sc = ic->ic_softc; 3574 3575 WPI_LOCK(sc); 3576 if (sc->sc_running == 0) { 3577 WPI_UNLOCK(sc); 3578 return; 3579 } 3580 WPI_UNLOCK(sc); 3581 3582 WPI_RXON_LOCK(sc); 3583 wpi_set_promisc(sc); 3584 3585 if (wpi_send_rxon(sc, 1, 1) != 0) { 3586 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3587 __func__); 3588 } 3589 WPI_RXON_UNLOCK(sc); 3590 } 3591 3592 static void 3593 wpi_update_mcast(struct ieee80211com *ic) 3594 { 3595 /* Ignore */ 3596 } 3597 3598 static void 3599 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3600 { 3601 struct wpi_cmd_led led; 3602 3603 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3604 3605 led.which = which; 3606 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3607 led.off = off; 3608 led.on = on; 3609 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3610 } 3611 3612 static int 3613 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3614 { 3615 struct wpi_cmd_timing cmd; 3616 uint64_t val, mod; 3617 3618 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3619 3620 memset(&cmd, 0, sizeof cmd); 3621 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3622 cmd.bintval = htole16(ni->ni_intval); 3623 cmd.lintval = htole16(10); 3624 3625 /* Compute remaining time until next beacon. */ 3626 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3627 mod = le64toh(cmd.tstamp) % val; 3628 cmd.binitval = htole32((uint32_t)(val - mod)); 3629 3630 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3631 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3632 3633 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3634 } 3635 3636 /* 3637 * This function is called periodically (every 60 seconds) to adjust output 3638 * power to temperature changes. 3639 */ 3640 static void 3641 wpi_power_calibration(struct wpi_softc *sc) 3642 { 3643 int temp; 3644 3645 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3646 3647 /* Update sensor data. */ 3648 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3649 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3650 3651 /* Sanity-check read value. */ 3652 if (temp < -260 || temp > 25) { 3653 /* This can't be correct, ignore. */ 3654 DPRINTF(sc, WPI_DEBUG_TEMP, 3655 "out-of-range temperature reported: %d\n", temp); 3656 return; 3657 } 3658 3659 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3660 3661 /* Adjust Tx power if need be. */ 3662 if (abs(temp - sc->temp) <= 6) 3663 return; 3664 3665 sc->temp = temp; 3666 3667 if (wpi_set_txpower(sc, 1) != 0) { 3668 /* just warn, too bad for the automatic calibration... */ 3669 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3670 } 3671 } 3672 3673 /* 3674 * Set TX power for current channel. 3675 */ 3676 static int 3677 wpi_set_txpower(struct wpi_softc *sc, int async) 3678 { 3679 struct wpi_power_group *group; 3680 struct wpi_cmd_txpower cmd; 3681 uint8_t chan; 3682 int idx, is_chan_5ghz, i; 3683 3684 /* Retrieve current channel from last RXON. */ 3685 chan = sc->rxon.chan; 3686 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3687 3688 /* Find the TX power group to which this channel belongs. */ 3689 if (is_chan_5ghz) { 3690 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3691 if (chan <= group->chan) 3692 break; 3693 } else 3694 group = &sc->groups[0]; 3695 3696 memset(&cmd, 0, sizeof cmd); 3697 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3698 cmd.chan = htole16(chan); 3699 3700 /* Set TX power for all OFDM and CCK rates. */ 3701 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3702 /* Retrieve TX power for this channel/rate. */ 3703 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3704 3705 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3706 3707 if (is_chan_5ghz) { 3708 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3709 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3710 } else { 3711 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3712 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3713 } 3714 DPRINTF(sc, WPI_DEBUG_TEMP, 3715 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3716 } 3717 3718 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3719 } 3720 3721 /* 3722 * Determine Tx power index for a given channel/rate combination. 3723 * This takes into account the regulatory information from EEPROM and the 3724 * current temperature. 3725 */ 3726 static int 3727 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3728 uint8_t chan, int is_chan_5ghz, int ridx) 3729 { 3730 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3731 #define fdivround(a, b, n) \ 3732 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3733 3734 /* Linear interpolation. */ 3735 #define interpolate(x, x1, y1, x2, y2, n) \ 3736 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3737 3738 struct wpi_power_sample *sample; 3739 int pwr, idx; 3740 3741 /* Default TX power is group maximum TX power minus 3dB. */ 3742 pwr = group->maxpwr / 2; 3743 3744 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3745 switch (ridx) { 3746 case WPI_RIDX_OFDM36: 3747 pwr -= is_chan_5ghz ? 5 : 0; 3748 break; 3749 case WPI_RIDX_OFDM48: 3750 pwr -= is_chan_5ghz ? 10 : 7; 3751 break; 3752 case WPI_RIDX_OFDM54: 3753 pwr -= is_chan_5ghz ? 12 : 9; 3754 break; 3755 } 3756 3757 /* Never exceed the channel maximum allowed TX power. */ 3758 pwr = min(pwr, sc->maxpwr[chan]); 3759 3760 /* Retrieve TX power index into gain tables from samples. */ 3761 for (sample = group->samples; sample < &group->samples[3]; sample++) 3762 if (pwr > sample[1].power) 3763 break; 3764 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3765 idx = interpolate(pwr, sample[0].power, sample[0].index, 3766 sample[1].power, sample[1].index, 19); 3767 3768 /*- 3769 * Adjust power index based on current temperature: 3770 * - if cooler than factory-calibrated: decrease output power 3771 * - if warmer than factory-calibrated: increase output power 3772 */ 3773 idx -= (sc->temp - group->temp) * 11 / 100; 3774 3775 /* Decrease TX power for CCK rates (-5dB). */ 3776 if (ridx >= WPI_RIDX_CCK1) 3777 idx += 10; 3778 3779 /* Make sure idx stays in a valid range. */ 3780 if (idx < 0) 3781 return 0; 3782 if (idx > WPI_MAX_PWR_INDEX) 3783 return WPI_MAX_PWR_INDEX; 3784 return idx; 3785 3786 #undef interpolate 3787 #undef fdivround 3788 } 3789 3790 /* 3791 * Set STA mode power saving level (between 0 and 5). 3792 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3793 */ 3794 static int 3795 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3796 { 3797 struct wpi_pmgt_cmd cmd; 3798 const struct wpi_pmgt *pmgt; 3799 uint32_t max, reg; 3800 uint8_t skip_dtim; 3801 int i; 3802 3803 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3804 "%s: dtim=%d, level=%d, async=%d\n", 3805 __func__, dtim, level, async); 3806 3807 /* Select which PS parameters to use. */ 3808 if (dtim <= 10) 3809 pmgt = &wpi_pmgt[0][level]; 3810 else 3811 pmgt = &wpi_pmgt[1][level]; 3812 3813 memset(&cmd, 0, sizeof cmd); 3814 if (level != 0) /* not CAM */ 3815 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3816 /* Retrieve PCIe Active State Power Management (ASPM). */ 3817 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 3818 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 3819 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3820 3821 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3822 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3823 3824 if (dtim == 0) { 3825 dtim = 1; 3826 skip_dtim = 0; 3827 } else 3828 skip_dtim = pmgt->skip_dtim; 3829 3830 if (skip_dtim != 0) { 3831 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3832 max = pmgt->intval[4]; 3833 if (max == (uint32_t)-1) 3834 max = dtim * (skip_dtim + 1); 3835 else if (max > dtim) 3836 max = rounddown(max, dtim); 3837 } else 3838 max = dtim; 3839 3840 for (i = 0; i < 5; i++) 3841 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3842 3843 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3844 } 3845 3846 static int 3847 wpi_send_btcoex(struct wpi_softc *sc) 3848 { 3849 struct wpi_bluetooth cmd; 3850 3851 memset(&cmd, 0, sizeof cmd); 3852 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3853 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3854 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3855 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3856 __func__); 3857 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3858 } 3859 3860 static int 3861 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3862 { 3863 int error; 3864 3865 if (async) 3866 WPI_RXON_LOCK_ASSERT(sc); 3867 3868 if (assoc && wpi_check_bss_filter(sc) != 0) { 3869 struct wpi_assoc rxon_assoc; 3870 3871 rxon_assoc.flags = sc->rxon.flags; 3872 rxon_assoc.filter = sc->rxon.filter; 3873 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3874 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3875 rxon_assoc.reserved = 0; 3876 3877 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3878 sizeof (struct wpi_assoc), async); 3879 if (error != 0) { 3880 device_printf(sc->sc_dev, 3881 "RXON_ASSOC command failed, error %d\n", error); 3882 return error; 3883 } 3884 } else { 3885 if (async) { 3886 WPI_NT_LOCK(sc); 3887 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3888 sizeof (struct wpi_rxon), async); 3889 if (error == 0) 3890 wpi_clear_node_table(sc); 3891 WPI_NT_UNLOCK(sc); 3892 } else { 3893 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3894 sizeof (struct wpi_rxon), async); 3895 if (error == 0) 3896 wpi_clear_node_table(sc); 3897 } 3898 3899 if (error != 0) { 3900 device_printf(sc->sc_dev, 3901 "RXON command failed, error %d\n", error); 3902 return error; 3903 } 3904 3905 /* Add broadcast node. */ 3906 error = wpi_add_broadcast_node(sc, async); 3907 if (error != 0) { 3908 device_printf(sc->sc_dev, 3909 "could not add broadcast node, error %d\n", error); 3910 return error; 3911 } 3912 } 3913 3914 /* Configuration has changed, set Tx power accordingly. */ 3915 if ((error = wpi_set_txpower(sc, async)) != 0) { 3916 device_printf(sc->sc_dev, 3917 "%s: could not set TX power, error %d\n", __func__, error); 3918 return error; 3919 } 3920 3921 return 0; 3922 } 3923 3924 /** 3925 * Configure the card to listen to a particular channel, this transisions the 3926 * card in to being able to receive frames from remote devices. 3927 */ 3928 static int 3929 wpi_config(struct wpi_softc *sc) 3930 { 3931 struct ieee80211com *ic = &sc->sc_ic; 3932 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3933 struct ieee80211_channel *c = ic->ic_curchan; 3934 int error; 3935 3936 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3937 3938 /* Set power saving level to CAM during initialization. */ 3939 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3940 device_printf(sc->sc_dev, 3941 "%s: could not set power saving level\n", __func__); 3942 return error; 3943 } 3944 3945 /* Configure bluetooth coexistence. */ 3946 if ((error = wpi_send_btcoex(sc)) != 0) { 3947 device_printf(sc->sc_dev, 3948 "could not configure bluetooth coexistence\n"); 3949 return error; 3950 } 3951 3952 /* Configure adapter. */ 3953 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3954 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3955 3956 /* Set default channel. */ 3957 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3958 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3959 if (IEEE80211_IS_CHAN_2GHZ(c)) 3960 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3961 3962 sc->rxon.filter = WPI_FILTER_MULTICAST; 3963 switch (ic->ic_opmode) { 3964 case IEEE80211_M_STA: 3965 sc->rxon.mode = WPI_MODE_STA; 3966 break; 3967 case IEEE80211_M_IBSS: 3968 sc->rxon.mode = WPI_MODE_IBSS; 3969 sc->rxon.filter |= WPI_FILTER_BEACON; 3970 break; 3971 case IEEE80211_M_HOSTAP: 3972 /* XXX workaround for beaconing */ 3973 sc->rxon.mode = WPI_MODE_IBSS; 3974 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3975 break; 3976 case IEEE80211_M_AHDEMO: 3977 sc->rxon.mode = WPI_MODE_HOSTAP; 3978 break; 3979 case IEEE80211_M_MONITOR: 3980 sc->rxon.mode = WPI_MODE_MONITOR; 3981 break; 3982 default: 3983 device_printf(sc->sc_dev, "unknown opmode %d\n", 3984 ic->ic_opmode); 3985 return EINVAL; 3986 } 3987 sc->rxon.filter = htole32(sc->rxon.filter); 3988 wpi_set_promisc(sc); 3989 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3990 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3991 3992 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3993 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3994 __func__); 3995 return error; 3996 } 3997 3998 /* Setup rate scalling. */ 3999 if ((error = wpi_mrr_setup(sc)) != 0) { 4000 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 4001 error); 4002 return error; 4003 } 4004 4005 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4006 4007 return 0; 4008 } 4009 4010 static uint16_t 4011 wpi_get_active_dwell_time(struct wpi_softc *sc, 4012 struct ieee80211_channel *c, uint8_t n_probes) 4013 { 4014 /* No channel? Default to 2GHz settings. */ 4015 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 4016 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 4017 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 4018 } 4019 4020 /* 5GHz dwell time. */ 4021 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4022 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4023 } 4024 4025 /* 4026 * Limit the total dwell time. 4027 * 4028 * Returns the dwell time in milliseconds. 4029 */ 4030 static uint16_t 4031 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4032 { 4033 struct ieee80211com *ic = &sc->sc_ic; 4034 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4035 uint16_t bintval = 0; 4036 4037 /* bintval is in TU (1.024mS) */ 4038 if (vap != NULL) 4039 bintval = vap->iv_bss->ni_intval; 4040 4041 /* 4042 * If it's non-zero, we should calculate the minimum of 4043 * it and the DWELL_BASE. 4044 * 4045 * XXX Yes, the math should take into account that bintval 4046 * is 1.024mS, not 1mS.. 4047 */ 4048 if (bintval > 0) { 4049 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4050 bintval); 4051 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4052 } 4053 4054 /* No association context? Default. */ 4055 return dwell_time; 4056 } 4057 4058 static uint16_t 4059 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4060 { 4061 uint16_t passive; 4062 4063 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4064 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4065 else 4066 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4067 4068 /* Clamp to the beacon interval if we're associated. */ 4069 return (wpi_limit_dwell(sc, passive)); 4070 } 4071 4072 static uint32_t 4073 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4074 { 4075 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4076 uint32_t nbeacons = time / bintval; 4077 4078 if (mod > WPI_PAUSE_MAX_TIME) 4079 mod = WPI_PAUSE_MAX_TIME; 4080 4081 return WPI_PAUSE_SCAN(nbeacons, mod); 4082 } 4083 4084 /* 4085 * Send a scan request to the firmware. 4086 */ 4087 static int 4088 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4089 { 4090 struct ieee80211com *ic = &sc->sc_ic; 4091 struct ieee80211_scan_state *ss = ic->ic_scan; 4092 struct ieee80211vap *vap = ss->ss_vap; 4093 struct wpi_scan_hdr *hdr; 4094 struct wpi_cmd_data *tx; 4095 struct wpi_scan_essid *essids; 4096 struct wpi_scan_chan *chan; 4097 struct ieee80211_frame *wh; 4098 struct ieee80211_rateset *rs; 4099 uint16_t bintval, buflen, dwell_active, dwell_passive; 4100 uint8_t *buf, *frm, i, nssid; 4101 int bgscan, error; 4102 4103 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4104 4105 /* 4106 * We are absolutely not allowed to send a scan command when another 4107 * scan command is pending. 4108 */ 4109 if (callout_pending(&sc->scan_timeout)) { 4110 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4111 __func__); 4112 error = EAGAIN; 4113 goto fail; 4114 } 4115 4116 bgscan = wpi_check_bss_filter(sc); 4117 bintval = vap->iv_bss->ni_intval; 4118 if (bgscan != 0 && 4119 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4120 error = EOPNOTSUPP; 4121 goto fail; 4122 } 4123 4124 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4125 if (buf == NULL) { 4126 device_printf(sc->sc_dev, 4127 "%s: could not allocate buffer for scan command\n", 4128 __func__); 4129 error = ENOMEM; 4130 goto fail; 4131 } 4132 hdr = (struct wpi_scan_hdr *)buf; 4133 4134 /* 4135 * Move to the next channel if no packets are received within 10 msecs 4136 * after sending the probe request. 4137 */ 4138 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4139 hdr->quiet_threshold = htole16(1); 4140 4141 if (bgscan != 0) { 4142 /* 4143 * Max needs to be greater than active and passive and quiet! 4144 * It's also in microseconds! 4145 */ 4146 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4147 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4148 bintval)); 4149 } 4150 4151 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4152 4153 tx = (struct wpi_cmd_data *)(hdr + 1); 4154 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4155 tx->id = WPI_ID_BROADCAST; 4156 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4157 4158 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4159 /* Send probe requests at 6Mbps. */ 4160 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4161 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4162 } else { 4163 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4164 /* Send probe requests at 1Mbps. */ 4165 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4166 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4167 } 4168 4169 essids = (struct wpi_scan_essid *)(tx + 1); 4170 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4171 for (i = 0; i < nssid; i++) { 4172 essids[i].id = IEEE80211_ELEMID_SSID; 4173 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4174 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4175 #ifdef WPI_DEBUG 4176 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4177 printf("Scanning Essid: "); 4178 ieee80211_print_essid(essids[i].data, essids[i].len); 4179 printf("\n"); 4180 } 4181 #endif 4182 } 4183 4184 /* 4185 * Build a probe request frame. Most of the following code is a 4186 * copy & paste of what is done in net80211. 4187 */ 4188 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4189 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4190 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4191 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4192 IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); 4193 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4194 IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); 4195 4196 frm = (uint8_t *)(wh + 1); 4197 frm = ieee80211_add_ssid(frm, NULL, 0); 4198 frm = ieee80211_add_rates(frm, rs); 4199 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4200 frm = ieee80211_add_xrates(frm, rs); 4201 4202 /* Set length of probe request. */ 4203 tx->len = htole16(frm - (uint8_t *)wh); 4204 4205 /* 4206 * Construct information about the channel that we 4207 * want to scan. The firmware expects this to be directly 4208 * after the scan probe request 4209 */ 4210 chan = (struct wpi_scan_chan *)frm; 4211 chan->chan = ieee80211_chan2ieee(ic, c); 4212 chan->flags = 0; 4213 if (nssid) { 4214 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4215 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4216 } else 4217 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4218 4219 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4220 chan->flags |= WPI_CHAN_ACTIVE; 4221 4222 /* 4223 * Calculate the active/passive dwell times. 4224 */ 4225 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4226 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4227 4228 /* Make sure they're valid. */ 4229 if (dwell_active > dwell_passive) 4230 dwell_active = dwell_passive; 4231 4232 chan->active = htole16(dwell_active); 4233 chan->passive = htole16(dwell_passive); 4234 4235 chan->dsp_gain = 0x6e; /* Default level */ 4236 4237 if (IEEE80211_IS_CHAN_5GHZ(c)) 4238 chan->rf_gain = 0x3b; 4239 else 4240 chan->rf_gain = 0x28; 4241 4242 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4243 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4244 4245 hdr->nchan++; 4246 4247 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4248 /* XXX Force probe request transmission. */ 4249 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4250 4251 chan++; 4252 4253 /* Reduce unnecessary delay. */ 4254 chan->flags = 0; 4255 chan->passive = chan->active = hdr->quiet_time; 4256 4257 hdr->nchan++; 4258 } 4259 4260 chan++; 4261 4262 buflen = (uint8_t *)chan - buf; 4263 hdr->len = htole16(buflen); 4264 4265 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4266 hdr->nchan); 4267 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4268 free(buf, M_DEVBUF); 4269 4270 if (error != 0) 4271 goto fail; 4272 4273 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4274 4275 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4276 4277 return 0; 4278 4279 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4280 4281 return error; 4282 } 4283 4284 static int 4285 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4286 { 4287 struct ieee80211com *ic = vap->iv_ic; 4288 struct ieee80211_node *ni = vap->iv_bss; 4289 struct ieee80211_channel *c = ni->ni_chan; 4290 int error; 4291 4292 WPI_RXON_LOCK(sc); 4293 4294 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4295 4296 /* Update adapter configuration. */ 4297 sc->rxon.associd = 0; 4298 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4299 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4300 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4301 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4302 if (IEEE80211_IS_CHAN_2GHZ(c)) 4303 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4304 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4305 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4306 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4307 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4308 if (IEEE80211_IS_CHAN_A(c)) { 4309 sc->rxon.cck_mask = 0; 4310 sc->rxon.ofdm_mask = 0x15; 4311 } else if (IEEE80211_IS_CHAN_B(c)) { 4312 sc->rxon.cck_mask = 0x03; 4313 sc->rxon.ofdm_mask = 0; 4314 } else { 4315 /* Assume 802.11b/g. */ 4316 sc->rxon.cck_mask = 0x0f; 4317 sc->rxon.ofdm_mask = 0x15; 4318 } 4319 4320 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4321 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4322 sc->rxon.ofdm_mask); 4323 4324 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4325 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4326 __func__); 4327 } 4328 4329 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4330 4331 WPI_RXON_UNLOCK(sc); 4332 4333 return error; 4334 } 4335 4336 static int 4337 wpi_config_beacon(struct wpi_vap *wvp) 4338 { 4339 struct ieee80211vap *vap = &wvp->wv_vap; 4340 struct ieee80211com *ic = vap->iv_ic; 4341 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4342 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4343 struct wpi_softc *sc = ic->ic_softc; 4344 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4345 struct ieee80211_tim_ie *tie; 4346 struct mbuf *m; 4347 uint8_t *ptr; 4348 int error; 4349 4350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4351 4352 WPI_VAP_LOCK_ASSERT(wvp); 4353 4354 cmd->len = htole16(bcn->m->m_pkthdr.len); 4355 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4356 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4357 4358 /* XXX seems to be unused */ 4359 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4360 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4361 ptr = mtod(bcn->m, uint8_t *); 4362 4363 cmd->tim = htole16(bo->bo_tim - ptr); 4364 cmd->timsz = tie->tim_len; 4365 } 4366 4367 /* Necessary for recursion in ieee80211_beacon_update(). */ 4368 m = bcn->m; 4369 bcn->m = m_dup(m, M_NOWAIT); 4370 if (bcn->m == NULL) { 4371 device_printf(sc->sc_dev, 4372 "%s: could not copy beacon frame\n", __func__); 4373 error = ENOMEM; 4374 goto end; 4375 } 4376 4377 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4378 device_printf(sc->sc_dev, 4379 "%s: could not update beacon frame, error %d", __func__, 4380 error); 4381 m_freem(bcn->m); 4382 } 4383 4384 /* Restore mbuf. */ 4385 end: bcn->m = m; 4386 4387 return error; 4388 } 4389 4390 static int 4391 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4392 { 4393 struct ieee80211vap *vap = ni->ni_vap; 4394 struct wpi_vap *wvp = WPI_VAP(vap); 4395 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4396 struct mbuf *m; 4397 int error; 4398 4399 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4400 4401 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4402 return EINVAL; 4403 4404 m = ieee80211_beacon_alloc(ni); 4405 if (m == NULL) { 4406 device_printf(sc->sc_dev, 4407 "%s: could not allocate beacon frame\n", __func__); 4408 return ENOMEM; 4409 } 4410 4411 WPI_VAP_LOCK(wvp); 4412 if (bcn->m != NULL) 4413 m_freem(bcn->m); 4414 4415 bcn->m = m; 4416 4417 error = wpi_config_beacon(wvp); 4418 WPI_VAP_UNLOCK(wvp); 4419 4420 return error; 4421 } 4422 4423 static void 4424 wpi_update_beacon(struct ieee80211vap *vap, int item) 4425 { 4426 struct wpi_softc *sc = vap->iv_ic->ic_softc; 4427 struct wpi_vap *wvp = WPI_VAP(vap); 4428 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4429 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; 4430 struct ieee80211_node *ni = vap->iv_bss; 4431 int mcast = 0; 4432 4433 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4434 4435 WPI_VAP_LOCK(wvp); 4436 if (bcn->m == NULL) { 4437 bcn->m = ieee80211_beacon_alloc(ni); 4438 if (bcn->m == NULL) { 4439 device_printf(sc->sc_dev, 4440 "%s: could not allocate beacon frame\n", __func__); 4441 4442 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4443 __func__); 4444 4445 WPI_VAP_UNLOCK(wvp); 4446 return; 4447 } 4448 } 4449 WPI_VAP_UNLOCK(wvp); 4450 4451 if (item == IEEE80211_BEACON_TIM) 4452 mcast = 1; /* TODO */ 4453 4454 setbit(bo->bo_flags, item); 4455 ieee80211_beacon_update(ni, bcn->m, mcast); 4456 4457 WPI_VAP_LOCK(wvp); 4458 wpi_config_beacon(wvp); 4459 WPI_VAP_UNLOCK(wvp); 4460 4461 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4462 } 4463 4464 static void 4465 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4466 { 4467 struct ieee80211vap *vap = ni->ni_vap; 4468 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4469 struct wpi_node *wn = WPI_NODE(ni); 4470 int error; 4471 4472 WPI_NT_LOCK(sc); 4473 4474 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4475 4476 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4477 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4478 device_printf(sc->sc_dev, 4479 "%s: could not add IBSS node, error %d\n", 4480 __func__, error); 4481 } 4482 } 4483 WPI_NT_UNLOCK(sc); 4484 } 4485 4486 static int 4487 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4488 { 4489 struct ieee80211com *ic = vap->iv_ic; 4490 struct ieee80211_node *ni = vap->iv_bss; 4491 struct ieee80211_channel *c = ni->ni_chan; 4492 int error; 4493 4494 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4495 4496 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4497 /* Link LED blinks while monitoring. */ 4498 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4499 return 0; 4500 } 4501 4502 /* XXX kernel panic workaround */ 4503 if (c == IEEE80211_CHAN_ANYC) { 4504 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4505 __func__); 4506 return EINVAL; 4507 } 4508 4509 if ((error = wpi_set_timing(sc, ni)) != 0) { 4510 device_printf(sc->sc_dev, 4511 "%s: could not set timing, error %d\n", __func__, error); 4512 return error; 4513 } 4514 4515 /* Update adapter configuration. */ 4516 WPI_RXON_LOCK(sc); 4517 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4518 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4519 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4520 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4521 if (IEEE80211_IS_CHAN_2GHZ(c)) 4522 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4523 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4524 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4525 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4526 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4527 if (IEEE80211_IS_CHAN_A(c)) { 4528 sc->rxon.cck_mask = 0; 4529 sc->rxon.ofdm_mask = 0x15; 4530 } else if (IEEE80211_IS_CHAN_B(c)) { 4531 sc->rxon.cck_mask = 0x03; 4532 sc->rxon.ofdm_mask = 0; 4533 } else { 4534 /* Assume 802.11b/g. */ 4535 sc->rxon.cck_mask = 0x0f; 4536 sc->rxon.ofdm_mask = 0x15; 4537 } 4538 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4539 4540 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4541 sc->rxon.chan, sc->rxon.flags); 4542 4543 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4544 WPI_RXON_UNLOCK(sc); 4545 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4546 __func__); 4547 return error; 4548 } 4549 4550 /* Start periodic calibration timer. */ 4551 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4552 4553 WPI_RXON_UNLOCK(sc); 4554 4555 if (vap->iv_opmode == IEEE80211_M_IBSS || 4556 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4557 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4558 device_printf(sc->sc_dev, 4559 "%s: could not setup beacon, error %d\n", __func__, 4560 error); 4561 return error; 4562 } 4563 } 4564 4565 if (vap->iv_opmode == IEEE80211_M_STA) { 4566 /* Add BSS node. */ 4567 WPI_NT_LOCK(sc); 4568 error = wpi_add_sta_node(sc, ni); 4569 WPI_NT_UNLOCK(sc); 4570 if (error != 0) { 4571 device_printf(sc->sc_dev, 4572 "%s: could not add BSS node, error %d\n", __func__, 4573 error); 4574 return error; 4575 } 4576 } 4577 4578 /* Link LED always on while associated. */ 4579 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4580 4581 /* Enable power-saving mode if requested by user. */ 4582 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4583 vap->iv_opmode != IEEE80211_M_IBSS) 4584 (void)wpi_set_pslevel(sc, 0, 3, 1); 4585 4586 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4587 4588 return 0; 4589 } 4590 4591 static int 4592 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4593 { 4594 const struct ieee80211_cipher *cip = k->wk_cipher; 4595 struct ieee80211vap *vap = ni->ni_vap; 4596 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4597 struct wpi_node *wn = WPI_NODE(ni); 4598 struct wpi_node_info node; 4599 uint16_t kflags; 4600 int error; 4601 4602 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4603 4604 if (wpi_check_node_entry(sc, wn->id) == 0) { 4605 device_printf(sc->sc_dev, "%s: node does not exist\n", 4606 __func__); 4607 return 0; 4608 } 4609 4610 switch (cip->ic_cipher) { 4611 case IEEE80211_CIPHER_AES_CCM: 4612 kflags = WPI_KFLAG_CCMP; 4613 break; 4614 4615 default: 4616 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4617 cip->ic_cipher); 4618 return 0; 4619 } 4620 4621 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4622 if (k->wk_flags & IEEE80211_KEY_GROUP) 4623 kflags |= WPI_KFLAG_MULTICAST; 4624 4625 memset(&node, 0, sizeof node); 4626 node.id = wn->id; 4627 node.control = WPI_NODE_UPDATE; 4628 node.flags = WPI_FLAG_KEY_SET; 4629 node.kflags = htole16(kflags); 4630 memcpy(node.key, k->wk_key, k->wk_keylen); 4631 again: 4632 DPRINTF(sc, WPI_DEBUG_KEY, 4633 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4634 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4635 node.id, ether_sprintf(ni->ni_macaddr)); 4636 4637 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4638 if (error != 0) { 4639 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4640 error); 4641 return !error; 4642 } 4643 4644 if (!(kflags & WPI_KFLAG_MULTICAST) && 4645 ieee80211_is_key_global(vap, k)) { 4646 kflags |= WPI_KFLAG_MULTICAST; 4647 node.kflags = htole16(kflags); 4648 4649 goto again; 4650 } 4651 4652 return 1; 4653 } 4654 4655 static void 4656 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4657 { 4658 const struct ieee80211_key *k = arg; 4659 struct ieee80211vap *vap = ni->ni_vap; 4660 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4661 struct wpi_node *wn = WPI_NODE(ni); 4662 int error; 4663 4664 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4665 return; 4666 4667 WPI_NT_LOCK(sc); 4668 error = wpi_load_key(ni, k); 4669 WPI_NT_UNLOCK(sc); 4670 4671 if (error == 0) { 4672 device_printf(sc->sc_dev, "%s: error while setting key\n", 4673 __func__); 4674 } 4675 } 4676 4677 static int 4678 wpi_set_global_keys(struct ieee80211_node *ni) 4679 { 4680 struct ieee80211vap *vap = ni->ni_vap; 4681 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4682 int error = 1; 4683 4684 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4685 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4686 error = wpi_load_key(ni, wk); 4687 4688 return !error; 4689 } 4690 4691 static int 4692 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4693 { 4694 struct ieee80211vap *vap = ni->ni_vap; 4695 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4696 struct wpi_node *wn = WPI_NODE(ni); 4697 struct wpi_node_info node; 4698 uint16_t kflags; 4699 int error; 4700 4701 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4702 4703 if (wpi_check_node_entry(sc, wn->id) == 0) { 4704 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4705 return 1; /* Nothing to do. */ 4706 } 4707 4708 kflags = WPI_KFLAG_KID(k->wk_keyix); 4709 if (k->wk_flags & IEEE80211_KEY_GROUP) 4710 kflags |= WPI_KFLAG_MULTICAST; 4711 4712 memset(&node, 0, sizeof node); 4713 node.id = wn->id; 4714 node.control = WPI_NODE_UPDATE; 4715 node.flags = WPI_FLAG_KEY_SET; 4716 node.kflags = htole16(kflags); 4717 again: 4718 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4719 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4720 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4721 4722 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4723 if (error != 0) { 4724 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4725 error); 4726 return !error; 4727 } 4728 4729 if (!(kflags & WPI_KFLAG_MULTICAST) && 4730 ieee80211_is_key_global(vap, k)) { 4731 kflags |= WPI_KFLAG_MULTICAST; 4732 node.kflags = htole16(kflags); 4733 4734 goto again; 4735 } 4736 4737 return 1; 4738 } 4739 4740 static void 4741 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4742 { 4743 const struct ieee80211_key *k = arg; 4744 struct ieee80211vap *vap = ni->ni_vap; 4745 struct wpi_softc *sc = ni->ni_ic->ic_softc; 4746 struct wpi_node *wn = WPI_NODE(ni); 4747 int error; 4748 4749 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4750 return; 4751 4752 WPI_NT_LOCK(sc); 4753 error = wpi_del_key(ni, k); 4754 WPI_NT_UNLOCK(sc); 4755 4756 if (error == 0) { 4757 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4758 __func__); 4759 } 4760 } 4761 4762 static int 4763 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4764 int set) 4765 { 4766 struct ieee80211com *ic = vap->iv_ic; 4767 struct wpi_softc *sc = ic->ic_softc; 4768 struct wpi_vap *wvp = WPI_VAP(vap); 4769 struct ieee80211_node *ni; 4770 int error, ni_ref = 0; 4771 4772 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4773 4774 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4775 /* Not for us. */ 4776 return 1; 4777 } 4778 4779 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4780 /* XMIT keys are handled in wpi_tx_data(). */ 4781 return 1; 4782 } 4783 4784 /* Handle group keys. */ 4785 if (ieee80211_is_key_global(vap, k)) { 4786 WPI_NT_LOCK(sc); 4787 if (set) 4788 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4789 else 4790 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4791 WPI_NT_UNLOCK(sc); 4792 4793 if (vap->iv_state == IEEE80211_S_RUN) { 4794 ieee80211_iterate_nodes(&ic->ic_sta, 4795 set ? wpi_load_key_cb : wpi_del_key_cb, 4796 __DECONST(void *, k)); 4797 } 4798 4799 return 1; 4800 } 4801 4802 switch (vap->iv_opmode) { 4803 case IEEE80211_M_STA: 4804 ni = vap->iv_bss; 4805 break; 4806 4807 case IEEE80211_M_IBSS: 4808 case IEEE80211_M_AHDEMO: 4809 case IEEE80211_M_HOSTAP: 4810 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4811 if (ni == NULL) 4812 return 0; /* should not happen */ 4813 4814 ni_ref = 1; 4815 break; 4816 4817 default: 4818 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4819 vap->iv_opmode); 4820 return 0; 4821 } 4822 4823 WPI_NT_LOCK(sc); 4824 if (set) 4825 error = wpi_load_key(ni, k); 4826 else 4827 error = wpi_del_key(ni, k); 4828 WPI_NT_UNLOCK(sc); 4829 4830 if (ni_ref) 4831 ieee80211_node_decref(ni); 4832 4833 return error; 4834 } 4835 4836 static int 4837 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) 4838 { 4839 return wpi_process_key(vap, k, 1); 4840 } 4841 4842 static int 4843 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4844 { 4845 return wpi_process_key(vap, k, 0); 4846 } 4847 4848 /* 4849 * This function is called after the runtime firmware notifies us of its 4850 * readiness (called in a process context). 4851 */ 4852 static int 4853 wpi_post_alive(struct wpi_softc *sc) 4854 { 4855 int ntries, error; 4856 4857 /* Check (again) that the radio is not disabled. */ 4858 if ((error = wpi_nic_lock(sc)) != 0) 4859 return error; 4860 4861 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4862 4863 /* NB: Runtime firmware must be up and running. */ 4864 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4865 device_printf(sc->sc_dev, 4866 "RF switch: radio disabled (%s)\n", __func__); 4867 wpi_nic_unlock(sc); 4868 return EPERM; /* :-) */ 4869 } 4870 wpi_nic_unlock(sc); 4871 4872 /* Wait for thermal sensor to calibrate. */ 4873 for (ntries = 0; ntries < 1000; ntries++) { 4874 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4875 break; 4876 DELAY(10); 4877 } 4878 4879 if (ntries == 1000) { 4880 device_printf(sc->sc_dev, 4881 "timeout waiting for thermal sensor calibration\n"); 4882 return ETIMEDOUT; 4883 } 4884 4885 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4886 return 0; 4887 } 4888 4889 /* 4890 * The firmware boot code is small and is intended to be copied directly into 4891 * the NIC internal memory (no DMA transfer). 4892 */ 4893 static int 4894 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size) 4895 { 4896 int error, ntries; 4897 4898 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4899 4900 size /= sizeof (uint32_t); 4901 4902 if ((error = wpi_nic_lock(sc)) != 0) 4903 return error; 4904 4905 /* Copy microcode image into NIC memory. */ 4906 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4907 (const uint32_t *)ucode, size); 4908 4909 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4910 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4911 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4912 4913 /* Start boot load now. */ 4914 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4915 4916 /* Wait for transfer to complete. */ 4917 for (ntries = 0; ntries < 1000; ntries++) { 4918 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4919 DPRINTF(sc, WPI_DEBUG_HW, 4920 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4921 WPI_FH_TX_STATUS_IDLE(6), 4922 status & WPI_FH_TX_STATUS_IDLE(6)); 4923 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4924 DPRINTF(sc, WPI_DEBUG_HW, 4925 "Status Match! - ntries = %d\n", ntries); 4926 break; 4927 } 4928 DELAY(10); 4929 } 4930 if (ntries == 1000) { 4931 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4932 __func__); 4933 wpi_nic_unlock(sc); 4934 return ETIMEDOUT; 4935 } 4936 4937 /* Enable boot after power up. */ 4938 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4939 4940 wpi_nic_unlock(sc); 4941 return 0; 4942 } 4943 4944 static int 4945 wpi_load_firmware(struct wpi_softc *sc) 4946 { 4947 struct wpi_fw_info *fw = &sc->fw; 4948 struct wpi_dma_info *dma = &sc->fw_dma; 4949 int error; 4950 4951 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4952 4953 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4954 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4955 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4956 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4957 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4958 4959 /* Tell adapter where to find initialization sections. */ 4960 if ((error = wpi_nic_lock(sc)) != 0) 4961 return error; 4962 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4963 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4964 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4965 dma->paddr + WPI_FW_DATA_MAXSZ); 4966 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4967 wpi_nic_unlock(sc); 4968 4969 /* Load firmware boot code. */ 4970 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4971 if (error != 0) { 4972 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4973 __func__); 4974 return error; 4975 } 4976 4977 /* Now press "execute". */ 4978 WPI_WRITE(sc, WPI_RESET, 0); 4979 4980 /* Wait at most one second for first alive notification. */ 4981 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4982 device_printf(sc->sc_dev, 4983 "%s: timeout waiting for adapter to initialize, error %d\n", 4984 __func__, error); 4985 return error; 4986 } 4987 4988 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4989 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4990 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4991 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4992 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4993 4994 /* Tell adapter where to find runtime sections. */ 4995 if ((error = wpi_nic_lock(sc)) != 0) 4996 return error; 4997 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4998 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4999 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 5000 dma->paddr + WPI_FW_DATA_MAXSZ); 5001 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 5002 WPI_FW_UPDATED | fw->main.textsz); 5003 wpi_nic_unlock(sc); 5004 5005 return 0; 5006 } 5007 5008 static int 5009 wpi_read_firmware(struct wpi_softc *sc) 5010 { 5011 const struct firmware *fp; 5012 struct wpi_fw_info *fw = &sc->fw; 5013 const struct wpi_firmware_hdr *hdr; 5014 int error; 5015 5016 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5017 5018 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5019 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5020 5021 WPI_UNLOCK(sc); 5022 fp = firmware_get(WPI_FW_NAME); 5023 WPI_LOCK(sc); 5024 5025 if (fp == NULL) { 5026 device_printf(sc->sc_dev, 5027 "could not load firmware image '%s'\n", WPI_FW_NAME); 5028 return EINVAL; 5029 } 5030 5031 sc->fw_fp = fp; 5032 5033 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5034 device_printf(sc->sc_dev, 5035 "firmware file too short: %zu bytes\n", fp->datasize); 5036 error = EINVAL; 5037 goto fail; 5038 } 5039 5040 fw->size = fp->datasize; 5041 fw->data = (const uint8_t *)fp->data; 5042 5043 /* Extract firmware header information. */ 5044 hdr = (const struct wpi_firmware_hdr *)fw->data; 5045 5046 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5047 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5048 5049 fw->main.textsz = le32toh(hdr->rtextsz); 5050 fw->main.datasz = le32toh(hdr->rdatasz); 5051 fw->init.textsz = le32toh(hdr->itextsz); 5052 fw->init.datasz = le32toh(hdr->idatasz); 5053 fw->boot.textsz = le32toh(hdr->btextsz); 5054 fw->boot.datasz = 0; 5055 5056 /* Sanity-check firmware header. */ 5057 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5058 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5059 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5060 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5061 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5062 (fw->boot.textsz & 3) != 0) { 5063 device_printf(sc->sc_dev, "invalid firmware header\n"); 5064 error = EINVAL; 5065 goto fail; 5066 } 5067 5068 /* Check that all firmware sections fit. */ 5069 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5070 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5071 device_printf(sc->sc_dev, 5072 "firmware file too short: %zu bytes\n", fw->size); 5073 error = EINVAL; 5074 goto fail; 5075 } 5076 5077 /* Get pointers to firmware sections. */ 5078 fw->main.text = (const uint8_t *)(hdr + 1); 5079 fw->main.data = fw->main.text + fw->main.textsz; 5080 fw->init.text = fw->main.data + fw->main.datasz; 5081 fw->init.data = fw->init.text + fw->init.textsz; 5082 fw->boot.text = fw->init.data + fw->init.datasz; 5083 5084 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5085 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5086 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5087 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5088 fw->main.textsz, fw->main.datasz, 5089 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5090 5091 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5092 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5093 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5094 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5095 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5096 5097 return 0; 5098 5099 fail: wpi_unload_firmware(sc); 5100 return error; 5101 } 5102 5103 /** 5104 * Free the referenced firmware image 5105 */ 5106 static void 5107 wpi_unload_firmware(struct wpi_softc *sc) 5108 { 5109 if (sc->fw_fp != NULL) { 5110 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5111 sc->fw_fp = NULL; 5112 } 5113 } 5114 5115 static int 5116 wpi_clock_wait(struct wpi_softc *sc) 5117 { 5118 int ntries; 5119 5120 /* Set "initialization complete" bit. */ 5121 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5122 5123 /* Wait for clock stabilization. */ 5124 for (ntries = 0; ntries < 2500; ntries++) { 5125 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5126 return 0; 5127 DELAY(100); 5128 } 5129 device_printf(sc->sc_dev, 5130 "%s: timeout waiting for clock stabilization\n", __func__); 5131 5132 return ETIMEDOUT; 5133 } 5134 5135 static int 5136 wpi_apm_init(struct wpi_softc *sc) 5137 { 5138 uint32_t reg; 5139 int error; 5140 5141 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5142 5143 /* Disable L0s exit timer (NMI bug workaround). */ 5144 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5145 /* Don't wait for ICH L0s (ICH bug workaround). */ 5146 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5147 5148 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5149 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5150 5151 /* Retrieve PCIe Active State Power Management (ASPM). */ 5152 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1); 5153 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5154 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 5155 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5156 else 5157 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5158 5159 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5160 5161 /* Wait for clock stabilization before accessing prph. */ 5162 if ((error = wpi_clock_wait(sc)) != 0) 5163 return error; 5164 5165 if ((error = wpi_nic_lock(sc)) != 0) 5166 return error; 5167 /* Cleanup. */ 5168 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5169 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5170 5171 /* Enable DMA and BSM (Bootstrap State Machine). */ 5172 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5173 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5174 DELAY(20); 5175 /* Disable L1-Active. */ 5176 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5177 wpi_nic_unlock(sc); 5178 5179 return 0; 5180 } 5181 5182 static void 5183 wpi_apm_stop_master(struct wpi_softc *sc) 5184 { 5185 int ntries; 5186 5187 /* Stop busmaster DMA activity. */ 5188 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5189 5190 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5191 WPI_GP_CNTRL_MAC_PS) 5192 return; /* Already asleep. */ 5193 5194 for (ntries = 0; ntries < 100; ntries++) { 5195 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5196 return; 5197 DELAY(10); 5198 } 5199 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5200 __func__); 5201 } 5202 5203 static void 5204 wpi_apm_stop(struct wpi_softc *sc) 5205 { 5206 wpi_apm_stop_master(sc); 5207 5208 /* Reset the entire device. */ 5209 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5210 DELAY(10); 5211 /* Clear "initialization complete" bit. */ 5212 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5213 } 5214 5215 static void 5216 wpi_nic_config(struct wpi_softc *sc) 5217 { 5218 uint32_t rev; 5219 5220 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5221 5222 /* voodoo from the Linux "driver".. */ 5223 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5224 if ((rev & 0xc0) == 0x40) 5225 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5226 else if (!(rev & 0x80)) 5227 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5228 5229 if (sc->cap == 0x80) 5230 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5231 5232 if ((sc->rev & 0xf0) == 0xd0) 5233 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5234 else 5235 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5236 5237 if (sc->type > 1) 5238 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5239 } 5240 5241 static int 5242 wpi_hw_init(struct wpi_softc *sc) 5243 { 5244 uint8_t chnl; 5245 int ntries, error; 5246 5247 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5248 5249 /* Clear pending interrupts. */ 5250 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5251 5252 if ((error = wpi_apm_init(sc)) != 0) { 5253 device_printf(sc->sc_dev, 5254 "%s: could not power ON adapter, error %d\n", __func__, 5255 error); 5256 return error; 5257 } 5258 5259 /* Select VMAIN power source. */ 5260 if ((error = wpi_nic_lock(sc)) != 0) 5261 return error; 5262 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5263 wpi_nic_unlock(sc); 5264 /* Spin until VMAIN gets selected. */ 5265 for (ntries = 0; ntries < 5000; ntries++) { 5266 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5267 break; 5268 DELAY(10); 5269 } 5270 if (ntries == 5000) { 5271 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5272 return ETIMEDOUT; 5273 } 5274 5275 /* Perform adapter initialization. */ 5276 wpi_nic_config(sc); 5277 5278 /* Initialize RX ring. */ 5279 if ((error = wpi_nic_lock(sc)) != 0) 5280 return error; 5281 /* Set physical address of RX ring. */ 5282 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5283 /* Set physical address of RX read pointer. */ 5284 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5285 offsetof(struct wpi_shared, next)); 5286 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5287 /* Enable RX. */ 5288 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5289 WPI_FH_RX_CONFIG_DMA_ENA | 5290 WPI_FH_RX_CONFIG_RDRBD_ENA | 5291 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5292 WPI_FH_RX_CONFIG_MAXFRAG | 5293 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5294 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5295 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5296 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5297 wpi_nic_unlock(sc); 5298 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5299 5300 /* Initialize TX rings. */ 5301 if ((error = wpi_nic_lock(sc)) != 0) 5302 return error; 5303 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5304 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5305 /* Enable all 6 TX rings. */ 5306 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5307 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5308 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5309 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5310 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5311 /* Set physical address of TX rings. */ 5312 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5313 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5314 5315 /* Enable all DMA channels. */ 5316 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5317 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5318 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5319 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5320 } 5321 wpi_nic_unlock(sc); 5322 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5323 5324 /* Clear "radio off" and "commands blocked" bits. */ 5325 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5326 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5327 5328 /* Clear pending interrupts. */ 5329 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5330 /* Enable interrupts. */ 5331 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5332 5333 /* _Really_ make sure "radio off" bit is cleared! */ 5334 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5335 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5336 5337 if ((error = wpi_load_firmware(sc)) != 0) { 5338 device_printf(sc->sc_dev, 5339 "%s: could not load firmware, error %d\n", __func__, 5340 error); 5341 return error; 5342 } 5343 /* Wait at most one second for firmware alive notification. */ 5344 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5345 device_printf(sc->sc_dev, 5346 "%s: timeout waiting for adapter to initialize, error %d\n", 5347 __func__, error); 5348 return error; 5349 } 5350 5351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5352 5353 /* Do post-firmware initialization. */ 5354 return wpi_post_alive(sc); 5355 } 5356 5357 static void 5358 wpi_hw_stop(struct wpi_softc *sc) 5359 { 5360 uint8_t chnl, qid; 5361 int ntries; 5362 5363 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5364 5365 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5366 wpi_nic_lock(sc); 5367 5368 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5369 5370 /* Disable interrupts. */ 5371 WPI_WRITE(sc, WPI_INT_MASK, 0); 5372 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5373 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5374 5375 /* Make sure we no longer hold the NIC lock. */ 5376 wpi_nic_unlock(sc); 5377 5378 if (wpi_nic_lock(sc) == 0) { 5379 /* Stop TX scheduler. */ 5380 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5381 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5382 5383 /* Stop all DMA channels. */ 5384 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5385 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5386 for (ntries = 0; ntries < 200; ntries++) { 5387 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5388 WPI_FH_TX_STATUS_IDLE(chnl)) 5389 break; 5390 DELAY(10); 5391 } 5392 } 5393 wpi_nic_unlock(sc); 5394 } 5395 5396 /* Stop RX ring. */ 5397 wpi_reset_rx_ring(sc); 5398 5399 /* Reset all TX rings. */ 5400 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) 5401 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5402 5403 if (wpi_nic_lock(sc) == 0) { 5404 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5405 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5406 wpi_nic_unlock(sc); 5407 } 5408 DELAY(5); 5409 /* Power OFF adapter. */ 5410 wpi_apm_stop(sc); 5411 } 5412 5413 static void 5414 wpi_radio_on(void *arg0, int pending) 5415 { 5416 struct wpi_softc *sc = arg0; 5417 struct ieee80211com *ic = &sc->sc_ic; 5418 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5419 5420 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5421 5422 WPI_LOCK(sc); 5423 callout_stop(&sc->watchdog_rfkill); 5424 WPI_UNLOCK(sc); 5425 5426 if (vap != NULL) 5427 ieee80211_init(vap); 5428 } 5429 5430 static void 5431 wpi_radio_off(void *arg0, int pending) 5432 { 5433 struct wpi_softc *sc = arg0; 5434 struct ieee80211com *ic = &sc->sc_ic; 5435 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5436 5437 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5438 5439 ieee80211_notify_radio(ic, 0); 5440 wpi_stop(sc); 5441 if (vap != NULL) 5442 ieee80211_stop(vap); 5443 5444 WPI_LOCK(sc); 5445 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5446 WPI_UNLOCK(sc); 5447 } 5448 5449 static int 5450 wpi_init(struct wpi_softc *sc) 5451 { 5452 int error = 0; 5453 5454 WPI_LOCK(sc); 5455 5456 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5457 5458 if (sc->sc_running != 0) 5459 goto end; 5460 5461 /* Check that the radio is not disabled by hardware switch. */ 5462 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5463 device_printf(sc->sc_dev, 5464 "RF switch: radio disabled (%s)\n", __func__); 5465 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5466 sc); 5467 error = EINPROGRESS; 5468 goto end; 5469 } 5470 5471 /* Read firmware images from the filesystem. */ 5472 if ((error = wpi_read_firmware(sc)) != 0) { 5473 device_printf(sc->sc_dev, 5474 "%s: could not read firmware, error %d\n", __func__, 5475 error); 5476 goto end; 5477 } 5478 5479 sc->sc_running = 1; 5480 5481 /* Initialize hardware and upload firmware. */ 5482 error = wpi_hw_init(sc); 5483 wpi_unload_firmware(sc); 5484 if (error != 0) { 5485 device_printf(sc->sc_dev, 5486 "%s: could not initialize hardware, error %d\n", __func__, 5487 error); 5488 goto fail; 5489 } 5490 5491 /* Configure adapter now that it is ready. */ 5492 if ((error = wpi_config(sc)) != 0) { 5493 device_printf(sc->sc_dev, 5494 "%s: could not configure device, error %d\n", __func__, 5495 error); 5496 goto fail; 5497 } 5498 5499 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5500 5501 WPI_UNLOCK(sc); 5502 5503 return 0; 5504 5505 fail: wpi_stop_locked(sc); 5506 5507 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5508 WPI_UNLOCK(sc); 5509 5510 return error; 5511 } 5512 5513 static void 5514 wpi_stop_locked(struct wpi_softc *sc) 5515 { 5516 5517 WPI_LOCK_ASSERT(sc); 5518 5519 if (sc->sc_running == 0) 5520 return; 5521 5522 WPI_TX_LOCK(sc); 5523 WPI_TXQ_LOCK(sc); 5524 sc->sc_running = 0; 5525 WPI_TXQ_UNLOCK(sc); 5526 WPI_TX_UNLOCK(sc); 5527 5528 WPI_TXQ_STATE_LOCK(sc); 5529 callout_stop(&sc->tx_timeout); 5530 WPI_TXQ_STATE_UNLOCK(sc); 5531 5532 WPI_RXON_LOCK(sc); 5533 callout_stop(&sc->scan_timeout); 5534 callout_stop(&sc->calib_to); 5535 WPI_RXON_UNLOCK(sc); 5536 5537 /* Power OFF hardware. */ 5538 wpi_hw_stop(sc); 5539 } 5540 5541 static void 5542 wpi_stop(struct wpi_softc *sc) 5543 { 5544 WPI_LOCK(sc); 5545 wpi_stop_locked(sc); 5546 WPI_UNLOCK(sc); 5547 } 5548 5549 /* 5550 * Callback from net80211 to start a scan. 5551 */ 5552 static void 5553 wpi_scan_start(struct ieee80211com *ic) 5554 { 5555 struct wpi_softc *sc = ic->ic_softc; 5556 5557 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5558 } 5559 5560 /* 5561 * Callback from net80211 to terminate a scan. 5562 */ 5563 static void 5564 wpi_scan_end(struct ieee80211com *ic) 5565 { 5566 struct wpi_softc *sc = ic->ic_softc; 5567 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5568 5569 if (vap->iv_state == IEEE80211_S_RUN) 5570 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5571 } 5572 5573 /** 5574 * Called by the net80211 framework to indicate to the driver 5575 * that the channel should be changed 5576 */ 5577 static void 5578 wpi_set_channel(struct ieee80211com *ic) 5579 { 5580 const struct ieee80211_channel *c = ic->ic_curchan; 5581 struct wpi_softc *sc = ic->ic_softc; 5582 int error; 5583 5584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5585 5586 WPI_LOCK(sc); 5587 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5588 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5589 WPI_UNLOCK(sc); 5590 WPI_TX_LOCK(sc); 5591 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5592 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5593 WPI_TX_UNLOCK(sc); 5594 5595 /* 5596 * Only need to set the channel in Monitor mode. AP scanning and auth 5597 * are already taken care of by their respective firmware commands. 5598 */ 5599 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5600 WPI_RXON_LOCK(sc); 5601 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5602 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5603 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5604 WPI_RXON_24GHZ); 5605 } else { 5606 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5607 WPI_RXON_24GHZ); 5608 } 5609 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5610 device_printf(sc->sc_dev, 5611 "%s: error %d setting channel\n", __func__, 5612 error); 5613 WPI_RXON_UNLOCK(sc); 5614 } 5615 } 5616 5617 /** 5618 * Called by net80211 to indicate that we need to scan the current 5619 * channel. The channel is previously be set via the wpi_set_channel 5620 * callback. 5621 */ 5622 static void 5623 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5624 { 5625 struct ieee80211vap *vap = ss->ss_vap; 5626 struct ieee80211com *ic = vap->iv_ic; 5627 struct wpi_softc *sc = ic->ic_softc; 5628 int error; 5629 5630 WPI_RXON_LOCK(sc); 5631 error = wpi_scan(sc, ic->ic_curchan); 5632 WPI_RXON_UNLOCK(sc); 5633 if (error != 0) 5634 ieee80211_cancel_scan(vap); 5635 } 5636 5637 /** 5638 * Called by the net80211 framework to indicate 5639 * the minimum dwell time has been met, terminate the scan. 5640 * We don't actually terminate the scan as the firmware will notify 5641 * us when it's finished and we have no way to interrupt it. 5642 */ 5643 static void 5644 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5645 { 5646 /* NB: don't try to abort scan; wait for firmware to finish */ 5647 } 5648