1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 int); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, int); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, int); 176 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, int, 181 int); 182 static void wpi_restore_node(void *, struct ieee80211_node *); 183 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 184 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 185 static void wpi_calib_timeout(void *); 186 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 187 struct wpi_rx_data *); 188 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 189 struct wpi_rx_data *); 190 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 191 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_notif_intr(struct wpi_softc *); 193 static void wpi_wakeup_intr(struct wpi_softc *); 194 #ifdef WPI_DEBUG 195 static void wpi_debug_registers(struct wpi_softc *); 196 #endif 197 static void wpi_fatal_intr(struct wpi_softc *); 198 static void wpi_intr(void *); 199 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 200 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 201 struct ieee80211_node *); 202 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 203 struct ieee80211_node *, 204 const struct ieee80211_bpf_params *); 205 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 206 const struct ieee80211_bpf_params *); 207 static void wpi_start(struct ifnet *); 208 static void wpi_start_task(void *, int); 209 static void wpi_watchdog_rfkill(void *); 210 static void wpi_scan_timeout(void *); 211 static void wpi_tx_timeout(void *); 212 static int wpi_ioctl(struct ifnet *, u_long, caddr_t); 213 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 214 static int wpi_mrr_setup(struct wpi_softc *); 215 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 216 static int wpi_add_broadcast_node(struct wpi_softc *, int); 217 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 218 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 219 static int wpi_updateedca(struct ieee80211com *); 220 static void wpi_set_promisc(struct wpi_softc *); 221 static void wpi_update_promisc(struct ifnet *); 222 static void wpi_update_mcast(struct ifnet *); 223 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 224 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 225 static void wpi_power_calibration(struct wpi_softc *); 226 static int wpi_set_txpower(struct wpi_softc *, int); 227 static int wpi_get_power_index(struct wpi_softc *, 228 struct wpi_power_group *, uint8_t, int, int); 229 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 230 static int wpi_send_btcoex(struct wpi_softc *); 231 static int wpi_send_rxon(struct wpi_softc *, int, int); 232 static int wpi_config(struct wpi_softc *); 233 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 234 struct ieee80211_channel *, uint8_t); 235 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 236 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 237 struct ieee80211_channel *); 238 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 239 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 240 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 241 static int wpi_config_beacon(struct wpi_vap *); 242 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 243 static void wpi_update_beacon(struct ieee80211vap *, int); 244 static void wpi_newassoc(struct ieee80211_node *, int); 245 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 246 static int wpi_load_key(struct ieee80211_node *, 247 const struct ieee80211_key *); 248 static void wpi_load_key_cb(void *, struct ieee80211_node *); 249 static int wpi_set_global_keys(struct ieee80211_node *); 250 static int wpi_del_key(struct ieee80211_node *, 251 const struct ieee80211_key *); 252 static void wpi_del_key_cb(void *, struct ieee80211_node *); 253 static int wpi_process_key(struct ieee80211vap *, 254 const struct ieee80211_key *, int); 255 static int wpi_key_set(struct ieee80211vap *, 256 const struct ieee80211_key *, 257 const uint8_t mac[IEEE80211_ADDR_LEN]); 258 static int wpi_key_delete(struct ieee80211vap *, 259 const struct ieee80211_key *); 260 static int wpi_post_alive(struct wpi_softc *); 261 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 262 static int wpi_load_firmware(struct wpi_softc *); 263 static int wpi_read_firmware(struct wpi_softc *); 264 static void wpi_unload_firmware(struct wpi_softc *); 265 static int wpi_clock_wait(struct wpi_softc *); 266 static int wpi_apm_init(struct wpi_softc *); 267 static void wpi_apm_stop_master(struct wpi_softc *); 268 static void wpi_apm_stop(struct wpi_softc *); 269 static void wpi_nic_config(struct wpi_softc *); 270 static int wpi_hw_init(struct wpi_softc *); 271 static void wpi_hw_stop(struct wpi_softc *); 272 static void wpi_radio_on(void *, int); 273 static void wpi_radio_off(void *, int); 274 static void wpi_init(void *); 275 static void wpi_stop_locked(struct wpi_softc *); 276 static void wpi_stop(struct wpi_softc *); 277 static void wpi_scan_start(struct ieee80211com *); 278 static void wpi_scan_end(struct ieee80211com *); 279 static void wpi_set_channel(struct ieee80211com *); 280 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 281 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 282 static void wpi_hw_reset(void *, int); 283 284 static device_method_t wpi_methods[] = { 285 /* Device interface */ 286 DEVMETHOD(device_probe, wpi_probe), 287 DEVMETHOD(device_attach, wpi_attach), 288 DEVMETHOD(device_detach, wpi_detach), 289 DEVMETHOD(device_shutdown, wpi_shutdown), 290 DEVMETHOD(device_suspend, wpi_suspend), 291 DEVMETHOD(device_resume, wpi_resume), 292 293 DEVMETHOD_END 294 }; 295 296 static driver_t wpi_driver = { 297 "wpi", 298 wpi_methods, 299 sizeof (struct wpi_softc) 300 }; 301 static devclass_t wpi_devclass; 302 303 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 304 305 MODULE_VERSION(wpi, 1); 306 307 MODULE_DEPEND(wpi, pci, 1, 1, 1); 308 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 309 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 310 311 static int 312 wpi_probe(device_t dev) 313 { 314 const struct wpi_ident *ident; 315 316 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 317 if (pci_get_vendor(dev) == ident->vendor && 318 pci_get_device(dev) == ident->device) { 319 device_set_desc(dev, ident->name); 320 return (BUS_PROBE_DEFAULT); 321 } 322 } 323 return ENXIO; 324 } 325 326 static int 327 wpi_attach(device_t dev) 328 { 329 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 330 struct ieee80211com *ic; 331 struct ifnet *ifp; 332 int i, error, rid; 333 #ifdef WPI_DEBUG 334 int supportsa = 1; 335 const struct wpi_ident *ident; 336 #endif 337 uint8_t macaddr[IEEE80211_ADDR_LEN]; 338 339 sc->sc_dev = dev; 340 341 #ifdef WPI_DEBUG 342 error = resource_int_value(device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 344 if (error != 0) 345 sc->sc_debug = 0; 346 #else 347 sc->sc_debug = 0; 348 #endif 349 350 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 351 352 /* 353 * Get the offset of the PCI Express Capability Structure in PCI 354 * Configuration Space. 355 */ 356 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 357 if (error != 0) { 358 device_printf(dev, "PCIe capability structure not found!\n"); 359 return error; 360 } 361 362 /* 363 * Some card's only support 802.11b/g not a, check to see if 364 * this is one such card. A 0x0 in the subdevice table indicates 365 * the entire subdevice range is to be ignored. 366 */ 367 #ifdef WPI_DEBUG 368 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 369 if (ident->subdevice && 370 pci_get_subdevice(dev) == ident->subdevice) { 371 supportsa = 0; 372 break; 373 } 374 } 375 #endif 376 377 /* Clear device-specific "PCI retry timeout" register (41h). */ 378 pci_write_config(dev, 0x41, 0, 1); 379 380 /* Enable bus-mastering. */ 381 pci_enable_busmaster(dev); 382 383 rid = PCIR_BAR(0); 384 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 385 RF_ACTIVE); 386 if (sc->mem == NULL) { 387 device_printf(dev, "can't map mem space\n"); 388 return ENOMEM; 389 } 390 sc->sc_st = rman_get_bustag(sc->mem); 391 sc->sc_sh = rman_get_bushandle(sc->mem); 392 393 i = 1; 394 rid = 0; 395 if (pci_alloc_msi(dev, &i) == 0) 396 rid = 1; 397 /* Install interrupt handler. */ 398 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 399 (rid != 0 ? 0 : RF_SHAREABLE)); 400 if (sc->irq == NULL) { 401 device_printf(dev, "can't map interrupt\n"); 402 error = ENOMEM; 403 goto fail; 404 } 405 406 WPI_LOCK_INIT(sc); 407 WPI_TX_LOCK_INIT(sc); 408 WPI_RXON_LOCK_INIT(sc); 409 WPI_NT_LOCK_INIT(sc); 410 WPI_TXQ_LOCK_INIT(sc); 411 WPI_TXQ_STATE_LOCK_INIT(sc); 412 413 /* Allocate DMA memory for firmware transfers. */ 414 if ((error = wpi_alloc_fwmem(sc)) != 0) { 415 device_printf(dev, 416 "could not allocate memory for firmware, error %d\n", 417 error); 418 goto fail; 419 } 420 421 /* Allocate shared page. */ 422 if ((error = wpi_alloc_shared(sc)) != 0) { 423 device_printf(dev, "could not allocate shared page\n"); 424 goto fail; 425 } 426 427 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 428 for (i = 0; i < WPI_NTXQUEUES; i++) { 429 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 430 device_printf(dev, 431 "could not allocate TX ring %d, error %d\n", i, 432 error); 433 goto fail; 434 } 435 } 436 437 /* Allocate RX ring. */ 438 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 439 device_printf(dev, "could not allocate RX ring, error %d\n", 440 error); 441 goto fail; 442 } 443 444 /* Clear pending interrupts. */ 445 WPI_WRITE(sc, WPI_INT, 0xffffffff); 446 447 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 448 if (ifp == NULL) { 449 device_printf(dev, "can not allocate ifnet structure\n"); 450 goto fail; 451 } 452 453 ic = ifp->if_l2com; 454 ic->ic_ifp = ifp; 455 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 456 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 457 458 /* Set device capabilities. */ 459 ic->ic_caps = 460 IEEE80211_C_STA /* station mode supported */ 461 | IEEE80211_C_IBSS /* IBSS mode supported */ 462 | IEEE80211_C_HOSTAP /* Host access point mode */ 463 | IEEE80211_C_MONITOR /* monitor mode supported */ 464 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 465 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 466 | IEEE80211_C_TXPMGT /* tx power management */ 467 | IEEE80211_C_SHSLOT /* short slot time supported */ 468 | IEEE80211_C_WPA /* 802.11i */ 469 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 470 | IEEE80211_C_WME /* 802.11e */ 471 | IEEE80211_C_PMGT /* Station-side power mgmt */ 472 ; 473 474 ic->ic_cryptocaps = 475 IEEE80211_CRYPTO_AES_CCM; 476 477 /* 478 * Read in the eeprom and also setup the channels for 479 * net80211. We don't set the rates as net80211 does this for us 480 */ 481 if ((error = wpi_read_eeprom(sc, macaddr)) != 0) { 482 device_printf(dev, "could not read EEPROM, error %d\n", 483 error); 484 goto fail; 485 } 486 487 #ifdef WPI_DEBUG 488 if (bootverbose) { 489 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 490 sc->domain); 491 device_printf(sc->sc_dev, "Hardware Type: %c\n", 492 sc->type > 1 ? 'B': '?'); 493 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 494 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 495 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 496 supportsa ? "does" : "does not"); 497 498 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 499 check what sc->rev really represents - benjsc 20070615 */ 500 } 501 #endif 502 503 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 504 ifp->if_softc = sc; 505 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 506 ifp->if_init = wpi_init; 507 ifp->if_ioctl = wpi_ioctl; 508 ifp->if_start = wpi_start; 509 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 510 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 511 IFQ_SET_READY(&ifp->if_snd); 512 513 ieee80211_ifattach(ic, macaddr); 514 ic->ic_vap_create = wpi_vap_create; 515 ic->ic_vap_delete = wpi_vap_delete; 516 ic->ic_raw_xmit = wpi_raw_xmit; 517 ic->ic_node_alloc = wpi_node_alloc; 518 sc->sc_node_free = ic->ic_node_free; 519 ic->ic_node_free = wpi_node_free; 520 ic->ic_wme.wme_update = wpi_updateedca; 521 ic->ic_update_promisc = wpi_update_promisc; 522 ic->ic_update_mcast = wpi_update_mcast; 523 ic->ic_newassoc = wpi_newassoc; 524 ic->ic_scan_start = wpi_scan_start; 525 ic->ic_scan_end = wpi_scan_end; 526 ic->ic_set_channel = wpi_set_channel; 527 ic->ic_scan_curchan = wpi_scan_curchan; 528 ic->ic_scan_mindwell = wpi_scan_mindwell; 529 ic->ic_setregdomain = wpi_setregdomain; 530 531 sc->sc_update_rx_ring = wpi_update_rx_ring; 532 sc->sc_update_tx_ring = wpi_update_tx_ring; 533 534 wpi_radiotap_attach(sc); 535 536 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 537 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 538 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 539 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 540 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 541 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 542 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 543 TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc); 544 545 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 546 taskqueue_thread_enqueue, &sc->sc_tq); 547 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 548 if (error != 0) { 549 device_printf(dev, "can't start threads, error %d\n", error); 550 goto fail; 551 } 552 553 wpi_sysctlattach(sc); 554 555 /* 556 * Hook our interrupt after all initialization is complete. 557 */ 558 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 559 NULL, wpi_intr, sc, &sc->sc_ih); 560 if (error != 0) { 561 device_printf(dev, "can't establish interrupt, error %d\n", 562 error); 563 goto fail; 564 } 565 566 if (bootverbose) 567 ieee80211_announce(ic); 568 569 #ifdef WPI_DEBUG 570 if (sc->sc_debug & WPI_DEBUG_HW) 571 ieee80211_announce_channels(ic); 572 #endif 573 574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 575 return 0; 576 577 fail: wpi_detach(dev); 578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 579 return error; 580 } 581 582 /* 583 * Attach the interface to 802.11 radiotap. 584 */ 585 static void 586 wpi_radiotap_attach(struct wpi_softc *sc) 587 { 588 struct ifnet *ifp = sc->sc_ifp; 589 struct ieee80211com *ic = ifp->if_l2com; 590 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 591 ieee80211_radiotap_attach(ic, 592 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 593 WPI_TX_RADIOTAP_PRESENT, 594 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 595 WPI_RX_RADIOTAP_PRESENT); 596 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 597 } 598 599 static void 600 wpi_sysctlattach(struct wpi_softc *sc) 601 { 602 #ifdef WPI_DEBUG 603 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 604 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 605 606 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 607 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 608 "control debugging printfs"); 609 #endif 610 } 611 612 static void 613 wpi_init_beacon(struct wpi_vap *wvp) 614 { 615 struct wpi_buf *bcn = &wvp->wv_bcbuf; 616 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 617 618 cmd->id = WPI_ID_BROADCAST; 619 cmd->ofdm_mask = 0xff; 620 cmd->cck_mask = 0x0f; 621 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 622 623 /* 624 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 625 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 626 */ 627 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 628 629 bcn->code = WPI_CMD_SET_BEACON; 630 bcn->ac = WPI_CMD_QUEUE_NUM; 631 bcn->size = sizeof(struct wpi_cmd_beacon); 632 } 633 634 static struct ieee80211vap * 635 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 636 enum ieee80211_opmode opmode, int flags, 637 const uint8_t bssid[IEEE80211_ADDR_LEN], 638 const uint8_t mac[IEEE80211_ADDR_LEN]) 639 { 640 struct wpi_vap *wvp; 641 struct ieee80211vap *vap; 642 643 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 644 return NULL; 645 646 wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), 647 M_80211_VAP, M_NOWAIT | M_ZERO); 648 if (wvp == NULL) 649 return NULL; 650 vap = &wvp->wv_vap; 651 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 652 653 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 654 WPI_VAP_LOCK_INIT(wvp); 655 wpi_init_beacon(wvp); 656 } 657 658 /* Override with driver methods. */ 659 vap->iv_key_set = wpi_key_set; 660 vap->iv_key_delete = wpi_key_delete; 661 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 662 vap->iv_recv_mgmt = wpi_recv_mgmt; 663 wvp->wv_newstate = vap->iv_newstate; 664 vap->iv_newstate = wpi_newstate; 665 vap->iv_update_beacon = wpi_update_beacon; 666 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 667 668 ieee80211_ratectl_init(vap); 669 /* Complete setup. */ 670 ieee80211_vap_attach(vap, ieee80211_media_change, 671 ieee80211_media_status); 672 ic->ic_opmode = opmode; 673 return vap; 674 } 675 676 static void 677 wpi_vap_delete(struct ieee80211vap *vap) 678 { 679 struct wpi_vap *wvp = WPI_VAP(vap); 680 struct wpi_buf *bcn = &wvp->wv_bcbuf; 681 enum ieee80211_opmode opmode = vap->iv_opmode; 682 683 ieee80211_ratectl_deinit(vap); 684 ieee80211_vap_detach(vap); 685 686 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 687 if (bcn->m != NULL) 688 m_freem(bcn->m); 689 690 WPI_VAP_LOCK_DESTROY(wvp); 691 } 692 693 free(wvp, M_80211_VAP); 694 } 695 696 static int 697 wpi_detach(device_t dev) 698 { 699 struct wpi_softc *sc = device_get_softc(dev); 700 struct ifnet *ifp = sc->sc_ifp; 701 struct ieee80211com *ic; 702 int qid; 703 704 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 705 706 if (ifp != NULL) { 707 ic = ifp->if_l2com; 708 709 ieee80211_draintask(ic, &sc->sc_radioon_task); 710 ieee80211_draintask(ic, &sc->sc_start_task); 711 712 wpi_stop(sc); 713 714 taskqueue_drain_all(sc->sc_tq); 715 taskqueue_free(sc->sc_tq); 716 717 callout_drain(&sc->watchdog_rfkill); 718 callout_drain(&sc->tx_timeout); 719 callout_drain(&sc->scan_timeout); 720 callout_drain(&sc->calib_to); 721 ieee80211_ifdetach(ic); 722 } 723 724 /* Uninstall interrupt handler. */ 725 if (sc->irq != NULL) { 726 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 727 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 728 sc->irq); 729 pci_release_msi(dev); 730 } 731 732 if (sc->txq[0].data_dmat) { 733 /* Free DMA resources. */ 734 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 735 wpi_free_tx_ring(sc, &sc->txq[qid]); 736 737 wpi_free_rx_ring(sc); 738 wpi_free_shared(sc); 739 } 740 741 if (sc->fw_dma.tag) 742 wpi_free_fwmem(sc); 743 744 if (sc->mem != NULL) 745 bus_release_resource(dev, SYS_RES_MEMORY, 746 rman_get_rid(sc->mem), sc->mem); 747 748 if (ifp != NULL) 749 if_free(ifp); 750 751 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 752 WPI_TXQ_STATE_LOCK_DESTROY(sc); 753 WPI_TXQ_LOCK_DESTROY(sc); 754 WPI_NT_LOCK_DESTROY(sc); 755 WPI_RXON_LOCK_DESTROY(sc); 756 WPI_TX_LOCK_DESTROY(sc); 757 WPI_LOCK_DESTROY(sc); 758 return 0; 759 } 760 761 static int 762 wpi_shutdown(device_t dev) 763 { 764 struct wpi_softc *sc = device_get_softc(dev); 765 766 wpi_stop(sc); 767 return 0; 768 } 769 770 static int 771 wpi_suspend(device_t dev) 772 { 773 struct wpi_softc *sc = device_get_softc(dev); 774 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 775 776 ieee80211_suspend_all(ic); 777 return 0; 778 } 779 780 static int 781 wpi_resume(device_t dev) 782 { 783 struct wpi_softc *sc = device_get_softc(dev); 784 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 785 786 /* Clear device-specific "PCI retry timeout" register (41h). */ 787 pci_write_config(dev, 0x41, 0, 1); 788 789 ieee80211_resume_all(ic); 790 return 0; 791 } 792 793 /* 794 * Grab exclusive access to NIC memory. 795 */ 796 static int 797 wpi_nic_lock(struct wpi_softc *sc) 798 { 799 int ntries; 800 801 /* Request exclusive access to NIC. */ 802 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 803 804 /* Spin until we actually get the lock. */ 805 for (ntries = 0; ntries < 1000; ntries++) { 806 if ((WPI_READ(sc, WPI_GP_CNTRL) & 807 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 808 WPI_GP_CNTRL_MAC_ACCESS_ENA) 809 return 0; 810 DELAY(10); 811 } 812 813 device_printf(sc->sc_dev, "could not lock memory\n"); 814 815 return ETIMEDOUT; 816 } 817 818 /* 819 * Release lock on NIC memory. 820 */ 821 static __inline void 822 wpi_nic_unlock(struct wpi_softc *sc) 823 { 824 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 825 } 826 827 static __inline uint32_t 828 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 829 { 830 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 831 WPI_BARRIER_READ_WRITE(sc); 832 return WPI_READ(sc, WPI_PRPH_RDATA); 833 } 834 835 static __inline void 836 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 837 { 838 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 839 WPI_BARRIER_WRITE(sc); 840 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 841 } 842 843 static __inline void 844 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 845 { 846 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 847 } 848 849 static __inline void 850 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 851 { 852 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 853 } 854 855 static __inline void 856 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 857 const uint32_t *data, int count) 858 { 859 for (; count > 0; count--, data++, addr += 4) 860 wpi_prph_write(sc, addr, *data); 861 } 862 863 static __inline uint32_t 864 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 865 { 866 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 867 WPI_BARRIER_READ_WRITE(sc); 868 return WPI_READ(sc, WPI_MEM_RDATA); 869 } 870 871 static __inline void 872 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 873 int count) 874 { 875 for (; count > 0; count--, addr += 4) 876 *data++ = wpi_mem_read(sc, addr); 877 } 878 879 static int 880 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 881 { 882 uint8_t *out = data; 883 uint32_t val; 884 int error, ntries; 885 886 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 887 888 if ((error = wpi_nic_lock(sc)) != 0) 889 return error; 890 891 for (; count > 0; count -= 2, addr++) { 892 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 893 for (ntries = 0; ntries < 10; ntries++) { 894 val = WPI_READ(sc, WPI_EEPROM); 895 if (val & WPI_EEPROM_READ_VALID) 896 break; 897 DELAY(5); 898 } 899 if (ntries == 10) { 900 device_printf(sc->sc_dev, 901 "timeout reading ROM at 0x%x\n", addr); 902 return ETIMEDOUT; 903 } 904 *out++= val >> 16; 905 if (count > 1) 906 *out ++= val >> 24; 907 } 908 909 wpi_nic_unlock(sc); 910 911 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 912 913 return 0; 914 } 915 916 static void 917 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 918 { 919 if (error != 0) 920 return; 921 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 922 *(bus_addr_t *)arg = segs[0].ds_addr; 923 } 924 925 /* 926 * Allocates a contiguous block of dma memory of the requested size and 927 * alignment. 928 */ 929 static int 930 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 931 void **kvap, bus_size_t size, bus_size_t alignment) 932 { 933 int error; 934 935 dma->tag = NULL; 936 dma->size = size; 937 938 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 939 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 940 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 941 if (error != 0) 942 goto fail; 943 944 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 945 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 946 if (error != 0) 947 goto fail; 948 949 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 950 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 951 if (error != 0) 952 goto fail; 953 954 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 955 956 if (kvap != NULL) 957 *kvap = dma->vaddr; 958 959 return 0; 960 961 fail: wpi_dma_contig_free(dma); 962 return error; 963 } 964 965 static void 966 wpi_dma_contig_free(struct wpi_dma_info *dma) 967 { 968 if (dma->vaddr != NULL) { 969 bus_dmamap_sync(dma->tag, dma->map, 970 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 971 bus_dmamap_unload(dma->tag, dma->map); 972 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 973 dma->vaddr = NULL; 974 } 975 if (dma->tag != NULL) { 976 bus_dma_tag_destroy(dma->tag); 977 dma->tag = NULL; 978 } 979 } 980 981 /* 982 * Allocate a shared page between host and NIC. 983 */ 984 static int 985 wpi_alloc_shared(struct wpi_softc *sc) 986 { 987 /* Shared buffer must be aligned on a 4KB boundary. */ 988 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 989 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 990 } 991 992 static void 993 wpi_free_shared(struct wpi_softc *sc) 994 { 995 wpi_dma_contig_free(&sc->shared_dma); 996 } 997 998 /* 999 * Allocate DMA-safe memory for firmware transfer. 1000 */ 1001 static int 1002 wpi_alloc_fwmem(struct wpi_softc *sc) 1003 { 1004 /* Must be aligned on a 16-byte boundary. */ 1005 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1006 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 1007 } 1008 1009 static void 1010 wpi_free_fwmem(struct wpi_softc *sc) 1011 { 1012 wpi_dma_contig_free(&sc->fw_dma); 1013 } 1014 1015 static int 1016 wpi_alloc_rx_ring(struct wpi_softc *sc) 1017 { 1018 struct wpi_rx_ring *ring = &sc->rxq; 1019 bus_size_t size; 1020 int i, error; 1021 1022 ring->cur = 0; 1023 ring->update = 0; 1024 1025 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1026 1027 /* Allocate RX descriptors (16KB aligned.) */ 1028 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1029 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1030 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1031 if (error != 0) { 1032 device_printf(sc->sc_dev, 1033 "%s: could not allocate RX ring DMA memory, error %d\n", 1034 __func__, error); 1035 goto fail; 1036 } 1037 1038 /* Create RX buffer DMA tag. */ 1039 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1040 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1041 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1042 &ring->data_dmat); 1043 if (error != 0) { 1044 device_printf(sc->sc_dev, 1045 "%s: could not create RX buf DMA tag, error %d\n", 1046 __func__, error); 1047 goto fail; 1048 } 1049 1050 /* 1051 * Allocate and map RX buffers. 1052 */ 1053 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1054 struct wpi_rx_data *data = &ring->data[i]; 1055 bus_addr_t paddr; 1056 1057 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1058 if (error != 0) { 1059 device_printf(sc->sc_dev, 1060 "%s: could not create RX buf DMA map, error %d\n", 1061 __func__, error); 1062 goto fail; 1063 } 1064 1065 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1066 if (data->m == NULL) { 1067 device_printf(sc->sc_dev, 1068 "%s: could not allocate RX mbuf\n", __func__); 1069 error = ENOBUFS; 1070 goto fail; 1071 } 1072 1073 error = bus_dmamap_load(ring->data_dmat, data->map, 1074 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1075 &paddr, BUS_DMA_NOWAIT); 1076 if (error != 0 && error != EFBIG) { 1077 device_printf(sc->sc_dev, 1078 "%s: can't map mbuf (error %d)\n", __func__, 1079 error); 1080 goto fail; 1081 } 1082 1083 /* Set physical address of RX buffer. */ 1084 ring->desc[i] = htole32(paddr); 1085 } 1086 1087 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1088 BUS_DMASYNC_PREWRITE); 1089 1090 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1091 1092 return 0; 1093 1094 fail: wpi_free_rx_ring(sc); 1095 1096 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1097 1098 return error; 1099 } 1100 1101 static void 1102 wpi_update_rx_ring(struct wpi_softc *sc) 1103 { 1104 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1105 } 1106 1107 static void 1108 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1109 { 1110 struct wpi_rx_ring *ring = &sc->rxq; 1111 1112 if (ring->update != 0) { 1113 /* Wait for INT_WAKEUP event. */ 1114 return; 1115 } 1116 1117 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1118 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1119 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1120 __func__); 1121 ring->update = 1; 1122 } else { 1123 wpi_update_rx_ring(sc); 1124 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1125 } 1126 } 1127 1128 static void 1129 wpi_reset_rx_ring(struct wpi_softc *sc) 1130 { 1131 struct wpi_rx_ring *ring = &sc->rxq; 1132 int ntries; 1133 1134 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1135 1136 if (wpi_nic_lock(sc) == 0) { 1137 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1138 for (ntries = 0; ntries < 1000; ntries++) { 1139 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1140 WPI_FH_RX_STATUS_IDLE) 1141 break; 1142 DELAY(10); 1143 } 1144 wpi_nic_unlock(sc); 1145 } 1146 1147 ring->cur = 0; 1148 ring->update = 0; 1149 } 1150 1151 static void 1152 wpi_free_rx_ring(struct wpi_softc *sc) 1153 { 1154 struct wpi_rx_ring *ring = &sc->rxq; 1155 int i; 1156 1157 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1158 1159 wpi_dma_contig_free(&ring->desc_dma); 1160 1161 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1162 struct wpi_rx_data *data = &ring->data[i]; 1163 1164 if (data->m != NULL) { 1165 bus_dmamap_sync(ring->data_dmat, data->map, 1166 BUS_DMASYNC_POSTREAD); 1167 bus_dmamap_unload(ring->data_dmat, data->map); 1168 m_freem(data->m); 1169 data->m = NULL; 1170 } 1171 if (data->map != NULL) 1172 bus_dmamap_destroy(ring->data_dmat, data->map); 1173 } 1174 if (ring->data_dmat != NULL) { 1175 bus_dma_tag_destroy(ring->data_dmat); 1176 ring->data_dmat = NULL; 1177 } 1178 } 1179 1180 static int 1181 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1182 { 1183 bus_addr_t paddr; 1184 bus_size_t size; 1185 int i, error; 1186 1187 ring->qid = qid; 1188 ring->queued = 0; 1189 ring->cur = 0; 1190 ring->update = 0; 1191 1192 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1193 1194 /* Allocate TX descriptors (16KB aligned.) */ 1195 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1196 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1197 size, WPI_RING_DMA_ALIGN); 1198 if (error != 0) { 1199 device_printf(sc->sc_dev, 1200 "%s: could not allocate TX ring DMA memory, error %d\n", 1201 __func__, error); 1202 goto fail; 1203 } 1204 1205 /* Update shared area with ring physical address. */ 1206 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1207 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1208 BUS_DMASYNC_PREWRITE); 1209 1210 /* 1211 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1212 * to allocate commands space for other rings. 1213 * XXX Do we really need to allocate descriptors for other rings? 1214 */ 1215 if (qid > WPI_CMD_QUEUE_NUM) { 1216 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1217 return 0; 1218 } 1219 1220 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1221 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1222 size, 4); 1223 if (error != 0) { 1224 device_printf(sc->sc_dev, 1225 "%s: could not allocate TX cmd DMA memory, error %d\n", 1226 __func__, error); 1227 goto fail; 1228 } 1229 1230 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1231 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1232 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1233 &ring->data_dmat); 1234 if (error != 0) { 1235 device_printf(sc->sc_dev, 1236 "%s: could not create TX buf DMA tag, error %d\n", 1237 __func__, error); 1238 goto fail; 1239 } 1240 1241 paddr = ring->cmd_dma.paddr; 1242 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1243 struct wpi_tx_data *data = &ring->data[i]; 1244 1245 data->cmd_paddr = paddr; 1246 paddr += sizeof (struct wpi_tx_cmd); 1247 1248 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1249 if (error != 0) { 1250 device_printf(sc->sc_dev, 1251 "%s: could not create TX buf DMA map, error %d\n", 1252 __func__, error); 1253 goto fail; 1254 } 1255 } 1256 1257 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1258 1259 return 0; 1260 1261 fail: wpi_free_tx_ring(sc, ring); 1262 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1263 return error; 1264 } 1265 1266 static void 1267 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1268 { 1269 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1270 } 1271 1272 static void 1273 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1274 { 1275 1276 if (ring->update != 0) { 1277 /* Wait for INT_WAKEUP event. */ 1278 return; 1279 } 1280 1281 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1282 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1283 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1284 __func__, ring->qid); 1285 ring->update = 1; 1286 } else { 1287 wpi_update_tx_ring(sc, ring); 1288 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1289 } 1290 } 1291 1292 static void 1293 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1294 { 1295 int i; 1296 1297 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1298 1299 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1300 struct wpi_tx_data *data = &ring->data[i]; 1301 1302 if (data->m != NULL) { 1303 bus_dmamap_sync(ring->data_dmat, data->map, 1304 BUS_DMASYNC_POSTWRITE); 1305 bus_dmamap_unload(ring->data_dmat, data->map); 1306 m_freem(data->m); 1307 data->m = NULL; 1308 } 1309 if (data->ni != NULL) { 1310 ieee80211_free_node(data->ni); 1311 data->ni = NULL; 1312 } 1313 } 1314 /* Clear TX descriptors. */ 1315 memset(ring->desc, 0, ring->desc_dma.size); 1316 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1317 BUS_DMASYNC_PREWRITE); 1318 sc->qfullmsk &= ~(1 << ring->qid); 1319 ring->queued = 0; 1320 ring->cur = 0; 1321 ring->update = 0; 1322 } 1323 1324 static void 1325 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1326 { 1327 int i; 1328 1329 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1330 1331 wpi_dma_contig_free(&ring->desc_dma); 1332 wpi_dma_contig_free(&ring->cmd_dma); 1333 1334 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1335 struct wpi_tx_data *data = &ring->data[i]; 1336 1337 if (data->m != NULL) { 1338 bus_dmamap_sync(ring->data_dmat, data->map, 1339 BUS_DMASYNC_POSTWRITE); 1340 bus_dmamap_unload(ring->data_dmat, data->map); 1341 m_freem(data->m); 1342 } 1343 if (data->map != NULL) 1344 bus_dmamap_destroy(ring->data_dmat, data->map); 1345 } 1346 if (ring->data_dmat != NULL) { 1347 bus_dma_tag_destroy(ring->data_dmat); 1348 ring->data_dmat = NULL; 1349 } 1350 } 1351 1352 /* 1353 * Extract various information from EEPROM. 1354 */ 1355 static int 1356 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1357 { 1358 #define WPI_CHK(res) do { \ 1359 if ((error = res) != 0) \ 1360 goto fail; \ 1361 } while (0) 1362 int error, i; 1363 1364 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1365 1366 /* Adapter has to be powered on for EEPROM access to work. */ 1367 if ((error = wpi_apm_init(sc)) != 0) { 1368 device_printf(sc->sc_dev, 1369 "%s: could not power ON adapter, error %d\n", __func__, 1370 error); 1371 return error; 1372 } 1373 1374 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1375 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1376 error = EIO; 1377 goto fail; 1378 } 1379 /* Clear HW ownership of EEPROM. */ 1380 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1381 1382 /* Read the hardware capabilities, revision and SKU type. */ 1383 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1384 sizeof(sc->cap))); 1385 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1386 sizeof(sc->rev))); 1387 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1388 sizeof(sc->type))); 1389 1390 sc->rev = le16toh(sc->rev); 1391 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1392 sc->rev, sc->type); 1393 1394 /* Read the regulatory domain (4 ASCII characters.) */ 1395 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1396 sizeof(sc->domain))); 1397 1398 /* Read MAC address. */ 1399 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1400 IEEE80211_ADDR_LEN)); 1401 1402 /* Read the list of authorized channels. */ 1403 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1404 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1405 1406 /* Read the list of TX power groups. */ 1407 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1408 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1409 1410 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1411 1412 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1413 __func__); 1414 1415 return error; 1416 #undef WPI_CHK 1417 } 1418 1419 /* 1420 * Translate EEPROM flags to net80211. 1421 */ 1422 static uint32_t 1423 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1424 { 1425 uint32_t nflags; 1426 1427 nflags = 0; 1428 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1429 nflags |= IEEE80211_CHAN_PASSIVE; 1430 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1431 nflags |= IEEE80211_CHAN_NOADHOC; 1432 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1433 nflags |= IEEE80211_CHAN_DFS; 1434 /* XXX apparently IBSS may still be marked */ 1435 nflags |= IEEE80211_CHAN_NOADHOC; 1436 } 1437 1438 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1439 if (nflags & IEEE80211_CHAN_NOADHOC) 1440 nflags |= IEEE80211_CHAN_NOHOSTAP; 1441 1442 return nflags; 1443 } 1444 1445 static void 1446 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1447 { 1448 struct ifnet *ifp = sc->sc_ifp; 1449 struct ieee80211com *ic = ifp->if_l2com; 1450 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1451 const struct wpi_chan_band *band = &wpi_bands[n]; 1452 struct ieee80211_channel *c; 1453 uint8_t chan; 1454 int i, nflags; 1455 1456 for (i = 0; i < band->nchan; i++) { 1457 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1458 DPRINTF(sc, WPI_DEBUG_EEPROM, 1459 "Channel Not Valid: %d, band %d\n", 1460 band->chan[i],n); 1461 continue; 1462 } 1463 1464 chan = band->chan[i]; 1465 nflags = wpi_eeprom_channel_flags(&channels[i]); 1466 1467 c = &ic->ic_channels[ic->ic_nchans++]; 1468 c->ic_ieee = chan; 1469 c->ic_maxregpower = channels[i].maxpwr; 1470 c->ic_maxpower = 2*c->ic_maxregpower; 1471 1472 if (n == 0) { /* 2GHz band */ 1473 c->ic_freq = ieee80211_ieee2mhz(chan, 1474 IEEE80211_CHAN_G); 1475 1476 /* G =>'s B is supported */ 1477 c->ic_flags = IEEE80211_CHAN_B | nflags; 1478 c = &ic->ic_channels[ic->ic_nchans++]; 1479 c[0] = c[-1]; 1480 c->ic_flags = IEEE80211_CHAN_G | nflags; 1481 } else { /* 5GHz band */ 1482 c->ic_freq = ieee80211_ieee2mhz(chan, 1483 IEEE80211_CHAN_A); 1484 1485 c->ic_flags = IEEE80211_CHAN_A | nflags; 1486 } 1487 1488 /* Save maximum allowed TX power for this channel. */ 1489 sc->maxpwr[chan] = channels[i].maxpwr; 1490 1491 DPRINTF(sc, WPI_DEBUG_EEPROM, 1492 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1493 " offset %d\n", chan, c->ic_freq, 1494 channels[i].flags, sc->maxpwr[chan], 1495 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1496 } 1497 } 1498 1499 /** 1500 * Read the eeprom to find out what channels are valid for the given 1501 * band and update net80211 with what we find. 1502 */ 1503 static int 1504 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1505 { 1506 struct ifnet *ifp = sc->sc_ifp; 1507 struct ieee80211com *ic = ifp->if_l2com; 1508 const struct wpi_chan_band *band = &wpi_bands[n]; 1509 int error; 1510 1511 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1512 1513 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1514 band->nchan * sizeof (struct wpi_eeprom_chan)); 1515 if (error != 0) { 1516 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1517 return error; 1518 } 1519 1520 wpi_read_eeprom_band(sc, n); 1521 1522 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1523 1524 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1525 1526 return 0; 1527 } 1528 1529 static struct wpi_eeprom_chan * 1530 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1531 { 1532 int i, j; 1533 1534 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1535 for (i = 0; i < wpi_bands[j].nchan; i++) 1536 if (wpi_bands[j].chan[i] == c->ic_ieee) 1537 return &sc->eeprom_channels[j][i]; 1538 1539 return NULL; 1540 } 1541 1542 /* 1543 * Enforce flags read from EEPROM. 1544 */ 1545 static int 1546 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1547 int nchan, struct ieee80211_channel chans[]) 1548 { 1549 struct ifnet *ifp = ic->ic_ifp; 1550 struct wpi_softc *sc = ifp->if_softc; 1551 int i; 1552 1553 for (i = 0; i < nchan; i++) { 1554 struct ieee80211_channel *c = &chans[i]; 1555 struct wpi_eeprom_chan *channel; 1556 1557 channel = wpi_find_eeprom_channel(sc, c); 1558 if (channel == NULL) { 1559 if_printf(ic->ic_ifp, 1560 "%s: invalid channel %u freq %u/0x%x\n", 1561 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1562 return EINVAL; 1563 } 1564 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1565 } 1566 1567 return 0; 1568 } 1569 1570 static int 1571 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1572 { 1573 struct wpi_power_group *group = &sc->groups[n]; 1574 struct wpi_eeprom_group rgroup; 1575 int i, error; 1576 1577 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1578 1579 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1580 &rgroup, sizeof rgroup)) != 0) { 1581 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1582 return error; 1583 } 1584 1585 /* Save TX power group information. */ 1586 group->chan = rgroup.chan; 1587 group->maxpwr = rgroup.maxpwr; 1588 /* Retrieve temperature at which the samples were taken. */ 1589 group->temp = (int16_t)le16toh(rgroup.temp); 1590 1591 DPRINTF(sc, WPI_DEBUG_EEPROM, 1592 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1593 group->maxpwr, group->temp); 1594 1595 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1596 group->samples[i].index = rgroup.samples[i].index; 1597 group->samples[i].power = rgroup.samples[i].power; 1598 1599 DPRINTF(sc, WPI_DEBUG_EEPROM, 1600 "\tsample %d: index=%d power=%d\n", i, 1601 group->samples[i].index, group->samples[i].power); 1602 } 1603 1604 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1605 1606 return 0; 1607 } 1608 1609 static int 1610 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1611 { 1612 int newid = WPI_ID_IBSS_MIN; 1613 1614 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1615 if ((sc->nodesmsk & (1 << newid)) == 0) { 1616 sc->nodesmsk |= 1 << newid; 1617 return newid; 1618 } 1619 } 1620 1621 return WPI_ID_UNDEFINED; 1622 } 1623 1624 static __inline int 1625 wpi_add_node_entry_sta(struct wpi_softc *sc) 1626 { 1627 sc->nodesmsk |= 1 << WPI_ID_BSS; 1628 1629 return WPI_ID_BSS; 1630 } 1631 1632 static __inline int 1633 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1634 { 1635 if (id == WPI_ID_UNDEFINED) 1636 return 0; 1637 1638 return (sc->nodesmsk >> id) & 1; 1639 } 1640 1641 static __inline void 1642 wpi_clear_node_table(struct wpi_softc *sc) 1643 { 1644 sc->nodesmsk = 0; 1645 } 1646 1647 static __inline void 1648 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1649 { 1650 sc->nodesmsk &= ~(1 << id); 1651 } 1652 1653 static struct ieee80211_node * 1654 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1655 { 1656 struct wpi_node *wn; 1657 1658 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1659 M_NOWAIT | M_ZERO); 1660 1661 if (wn == NULL) 1662 return NULL; 1663 1664 wn->id = WPI_ID_UNDEFINED; 1665 1666 return &wn->ni; 1667 } 1668 1669 static void 1670 wpi_node_free(struct ieee80211_node *ni) 1671 { 1672 struct ieee80211com *ic = ni->ni_ic; 1673 struct wpi_softc *sc = ic->ic_ifp->if_softc; 1674 struct wpi_node *wn = WPI_NODE(ni); 1675 1676 if (wn->id != WPI_ID_UNDEFINED) { 1677 WPI_NT_LOCK(sc); 1678 if (wpi_check_node_entry(sc, wn->id)) { 1679 wpi_del_node_entry(sc, wn->id); 1680 wpi_del_node(sc, ni); 1681 } 1682 WPI_NT_UNLOCK(sc); 1683 } 1684 1685 sc->sc_node_free(ni); 1686 } 1687 1688 static __inline int 1689 wpi_check_bss_filter(struct wpi_softc *sc) 1690 { 1691 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1692 } 1693 1694 static void 1695 wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, 1696 int nf) 1697 { 1698 struct ieee80211vap *vap = ni->ni_vap; 1699 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 1700 struct wpi_vap *wvp = WPI_VAP(vap); 1701 uint64_t ni_tstamp, rx_tstamp; 1702 1703 wvp->wv_recv_mgmt(ni, m, subtype, rssi, nf); 1704 1705 if (vap->iv_opmode == IEEE80211_M_IBSS && 1706 vap->iv_state == IEEE80211_S_RUN && 1707 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1708 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1709 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1710 rx_tstamp = le64toh(sc->rx_tstamp); 1711 1712 if (ni_tstamp >= rx_tstamp) { 1713 DPRINTF(sc, WPI_DEBUG_STATE, 1714 "ibss merge, tsf %ju tstamp %ju\n", 1715 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1716 (void) ieee80211_ibss_merge(ni); 1717 } 1718 } 1719 } 1720 1721 static void 1722 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1723 { 1724 struct wpi_softc *sc = arg; 1725 struct wpi_node *wn = WPI_NODE(ni); 1726 int error; 1727 1728 WPI_NT_LOCK(sc); 1729 if (wn->id != WPI_ID_UNDEFINED) { 1730 wn->id = WPI_ID_UNDEFINED; 1731 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1732 device_printf(sc->sc_dev, 1733 "%s: could not add IBSS node, error %d\n", 1734 __func__, error); 1735 } 1736 } 1737 WPI_NT_UNLOCK(sc); 1738 } 1739 1740 static void 1741 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1742 { 1743 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1744 1745 /* Set group keys once. */ 1746 WPI_NT_LOCK(sc); 1747 wvp->wv_gtk = 0; 1748 WPI_NT_UNLOCK(sc); 1749 1750 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1751 ieee80211_crypto_reload_keys(ic); 1752 } 1753 1754 /** 1755 * Called by net80211 when ever there is a change to 80211 state machine 1756 */ 1757 static int 1758 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1759 { 1760 struct wpi_vap *wvp = WPI_VAP(vap); 1761 struct ieee80211com *ic = vap->iv_ic; 1762 struct ifnet *ifp = ic->ic_ifp; 1763 struct wpi_softc *sc = ifp->if_softc; 1764 int error = 0; 1765 1766 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1767 1768 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1769 ieee80211_state_name[vap->iv_state], 1770 ieee80211_state_name[nstate]); 1771 1772 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1773 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1774 device_printf(sc->sc_dev, 1775 "%s: could not set power saving level\n", 1776 __func__); 1777 return error; 1778 } 1779 1780 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1781 } 1782 1783 switch (nstate) { 1784 case IEEE80211_S_SCAN: 1785 WPI_RXON_LOCK(sc); 1786 if (wpi_check_bss_filter(sc) != 0) { 1787 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1788 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1789 device_printf(sc->sc_dev, 1790 "%s: could not send RXON\n", __func__); 1791 } 1792 } 1793 WPI_RXON_UNLOCK(sc); 1794 break; 1795 1796 case IEEE80211_S_ASSOC: 1797 if (vap->iv_state != IEEE80211_S_RUN) 1798 break; 1799 /* FALLTHROUGH */ 1800 case IEEE80211_S_AUTH: 1801 /* 1802 * NB: do not optimize AUTH -> AUTH state transmission - 1803 * this will break powersave with non-QoS AP! 1804 */ 1805 1806 /* 1807 * The node must be registered in the firmware before auth. 1808 * Also the associd must be cleared on RUN -> ASSOC 1809 * transitions. 1810 */ 1811 if ((error = wpi_auth(sc, vap)) != 0) { 1812 device_printf(sc->sc_dev, 1813 "%s: could not move to AUTH state, error %d\n", 1814 __func__, error); 1815 } 1816 break; 1817 1818 case IEEE80211_S_RUN: 1819 /* 1820 * RUN -> RUN transition: 1821 * STA mode: Just restart the timers. 1822 * IBSS mode: Process IBSS merge. 1823 */ 1824 if (vap->iv_state == IEEE80211_S_RUN) { 1825 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1826 WPI_RXON_LOCK(sc); 1827 wpi_calib_timeout(sc); 1828 WPI_RXON_UNLOCK(sc); 1829 break; 1830 } else { 1831 /* 1832 * Drop the BSS_FILTER bit 1833 * (there is no another way to change bssid). 1834 */ 1835 WPI_RXON_LOCK(sc); 1836 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1837 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1838 device_printf(sc->sc_dev, 1839 "%s: could not send RXON\n", 1840 __func__); 1841 } 1842 WPI_RXON_UNLOCK(sc); 1843 1844 /* Restore all what was lost. */ 1845 wpi_restore_node_table(sc, wvp); 1846 1847 /* XXX set conditionally? */ 1848 wpi_updateedca(ic); 1849 } 1850 } 1851 1852 /* 1853 * !RUN -> RUN requires setting the association id 1854 * which is done with a firmware cmd. We also defer 1855 * starting the timers until that work is done. 1856 */ 1857 if ((error = wpi_run(sc, vap)) != 0) { 1858 device_printf(sc->sc_dev, 1859 "%s: could not move to RUN state\n", __func__); 1860 } 1861 break; 1862 1863 default: 1864 break; 1865 } 1866 if (error != 0) { 1867 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1868 return error; 1869 } 1870 1871 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1872 1873 return wvp->wv_newstate(vap, nstate, arg); 1874 } 1875 1876 static void 1877 wpi_calib_timeout(void *arg) 1878 { 1879 struct wpi_softc *sc = arg; 1880 1881 if (wpi_check_bss_filter(sc) == 0) 1882 return; 1883 1884 wpi_power_calibration(sc); 1885 1886 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1887 } 1888 1889 static __inline uint8_t 1890 rate2plcp(const uint8_t rate) 1891 { 1892 switch (rate) { 1893 case 12: return 0xd; 1894 case 18: return 0xf; 1895 case 24: return 0x5; 1896 case 36: return 0x7; 1897 case 48: return 0x9; 1898 case 72: return 0xb; 1899 case 96: return 0x1; 1900 case 108: return 0x3; 1901 case 2: return 10; 1902 case 4: return 20; 1903 case 11: return 55; 1904 case 22: return 110; 1905 default: return 0; 1906 } 1907 } 1908 1909 static __inline uint8_t 1910 plcp2rate(const uint8_t plcp) 1911 { 1912 switch (plcp) { 1913 case 0xd: return 12; 1914 case 0xf: return 18; 1915 case 0x5: return 24; 1916 case 0x7: return 36; 1917 case 0x9: return 48; 1918 case 0xb: return 72; 1919 case 0x1: return 96; 1920 case 0x3: return 108; 1921 case 10: return 2; 1922 case 20: return 4; 1923 case 55: return 11; 1924 case 110: return 22; 1925 default: return 0; 1926 } 1927 } 1928 1929 /* Quickly determine if a given rate is CCK or OFDM. */ 1930 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1931 1932 static void 1933 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1934 struct wpi_rx_data *data) 1935 { 1936 struct ifnet *ifp = sc->sc_ifp; 1937 struct ieee80211com *ic = ifp->if_l2com; 1938 struct wpi_rx_ring *ring = &sc->rxq; 1939 struct wpi_rx_stat *stat; 1940 struct wpi_rx_head *head; 1941 struct wpi_rx_tail *tail; 1942 struct ieee80211_frame *wh; 1943 struct ieee80211_node *ni; 1944 struct mbuf *m, *m1; 1945 bus_addr_t paddr; 1946 uint32_t flags; 1947 uint16_t len; 1948 int error; 1949 1950 stat = (struct wpi_rx_stat *)(desc + 1); 1951 1952 if (stat->len > WPI_STAT_MAXLEN) { 1953 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1954 goto fail1; 1955 } 1956 1957 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1958 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1959 len = le16toh(head->len); 1960 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1961 flags = le32toh(tail->flags); 1962 1963 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1964 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1965 le32toh(desc->len), len, (int8_t)stat->rssi, 1966 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1967 1968 /* Discard frames with a bad FCS early. */ 1969 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1970 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1971 __func__, flags); 1972 goto fail1; 1973 } 1974 /* Discard frames that are too short. */ 1975 if (len < sizeof (struct ieee80211_frame_ack)) { 1976 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1977 __func__, len); 1978 goto fail1; 1979 } 1980 1981 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1982 if (m1 == NULL) { 1983 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1984 __func__); 1985 goto fail1; 1986 } 1987 bus_dmamap_unload(ring->data_dmat, data->map); 1988 1989 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1990 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1991 if (error != 0 && error != EFBIG) { 1992 device_printf(sc->sc_dev, 1993 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1994 m_freem(m1); 1995 1996 /* Try to reload the old mbuf. */ 1997 error = bus_dmamap_load(ring->data_dmat, data->map, 1998 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1999 &paddr, BUS_DMA_NOWAIT); 2000 if (error != 0 && error != EFBIG) { 2001 panic("%s: could not load old RX mbuf", __func__); 2002 } 2003 /* Physical address may have changed. */ 2004 ring->desc[ring->cur] = htole32(paddr); 2005 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2006 BUS_DMASYNC_PREWRITE); 2007 goto fail1; 2008 } 2009 2010 m = data->m; 2011 data->m = m1; 2012 /* Update RX descriptor. */ 2013 ring->desc[ring->cur] = htole32(paddr); 2014 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2015 BUS_DMASYNC_PREWRITE); 2016 2017 /* Finalize mbuf. */ 2018 m->m_pkthdr.rcvif = ifp; 2019 m->m_data = (caddr_t)(head + 1); 2020 m->m_pkthdr.len = m->m_len = len; 2021 2022 /* Grab a reference to the source node. */ 2023 wh = mtod(m, struct ieee80211_frame *); 2024 2025 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2026 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2027 /* Check whether decryption was successful or not. */ 2028 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2029 DPRINTF(sc, WPI_DEBUG_RECV, 2030 "CCMP decryption failed 0x%x\n", flags); 2031 goto fail2; 2032 } 2033 m->m_flags |= M_WEP; 2034 } 2035 2036 if (len >= sizeof(struct ieee80211_frame_min)) 2037 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2038 else 2039 ni = NULL; 2040 2041 sc->rx_tstamp = tail->tstamp; 2042 2043 if (ieee80211_radiotap_active(ic)) { 2044 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2045 2046 tap->wr_flags = 0; 2047 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2048 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2049 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2050 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2051 tap->wr_tsft = tail->tstamp; 2052 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2053 tap->wr_rate = plcp2rate(head->plcp); 2054 } 2055 2056 WPI_UNLOCK(sc); 2057 2058 /* Send the frame to the 802.11 layer. */ 2059 if (ni != NULL) { 2060 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2061 /* Node is no longer needed. */ 2062 ieee80211_free_node(ni); 2063 } else 2064 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2065 2066 WPI_LOCK(sc); 2067 2068 return; 2069 2070 fail2: m_freem(m); 2071 2072 fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2073 } 2074 2075 static void 2076 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2077 struct wpi_rx_data *data) 2078 { 2079 /* Ignore */ 2080 } 2081 2082 static void 2083 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2084 { 2085 struct ifnet *ifp = sc->sc_ifp; 2086 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2087 struct wpi_tx_data *data = &ring->data[desc->idx]; 2088 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2089 struct mbuf *m; 2090 struct ieee80211_node *ni; 2091 struct ieee80211vap *vap; 2092 struct ieee80211com *ic; 2093 uint32_t status = le32toh(stat->status); 2094 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2095 2096 KASSERT(data->ni != NULL, ("no node")); 2097 KASSERT(data->m != NULL, ("no mbuf")); 2098 2099 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2100 2101 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2102 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2103 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2104 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2105 2106 /* Unmap and free mbuf. */ 2107 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2108 bus_dmamap_unload(ring->data_dmat, data->map); 2109 m = data->m, data->m = NULL; 2110 ni = data->ni, data->ni = NULL; 2111 vap = ni->ni_vap; 2112 ic = vap->iv_ic; 2113 2114 /* 2115 * Update rate control statistics for the node. 2116 */ 2117 if (status & WPI_TX_STATUS_FAIL) { 2118 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2119 ieee80211_ratectl_tx_complete(vap, ni, 2120 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2121 } else { 2122 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2123 ieee80211_ratectl_tx_complete(vap, ni, 2124 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2125 } 2126 2127 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2128 2129 WPI_TXQ_STATE_LOCK(sc); 2130 ring->queued -= 1; 2131 if (ring->queued > 0) { 2132 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2133 2134 if (sc->qfullmsk != 0 && 2135 ring->queued < WPI_TX_RING_LOMARK) { 2136 sc->qfullmsk &= ~(1 << ring->qid); 2137 IF_LOCK(&ifp->if_snd); 2138 if (sc->qfullmsk == 0 && 2139 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2140 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2141 IF_UNLOCK(&ifp->if_snd); 2142 ieee80211_runtask(ic, &sc->sc_start_task); 2143 } else 2144 IF_UNLOCK(&ifp->if_snd); 2145 } 2146 } else 2147 callout_stop(&sc->tx_timeout); 2148 WPI_TXQ_STATE_UNLOCK(sc); 2149 2150 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2151 } 2152 2153 /* 2154 * Process a "command done" firmware notification. This is where we wakeup 2155 * processes waiting for a synchronous command completion. 2156 */ 2157 static void 2158 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2159 { 2160 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2161 struct wpi_tx_data *data; 2162 2163 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2164 "type %s len %d\n", desc->qid, desc->idx, 2165 desc->flags, wpi_cmd_str(desc->type), 2166 le32toh(desc->len)); 2167 2168 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2169 return; /* Not a command ack. */ 2170 2171 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2172 2173 data = &ring->data[desc->idx]; 2174 2175 /* If the command was mapped in an mbuf, free it. */ 2176 if (data->m != NULL) { 2177 bus_dmamap_sync(ring->data_dmat, data->map, 2178 BUS_DMASYNC_POSTWRITE); 2179 bus_dmamap_unload(ring->data_dmat, data->map); 2180 m_freem(data->m); 2181 data->m = NULL; 2182 } 2183 2184 wakeup(&ring->cmd[desc->idx]); 2185 2186 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2187 WPI_TXQ_LOCK(sc); 2188 if (sc->sc_flags & WPI_PS_PATH) { 2189 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2190 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2191 } else { 2192 sc->sc_update_rx_ring = wpi_update_rx_ring; 2193 sc->sc_update_tx_ring = wpi_update_tx_ring; 2194 } 2195 WPI_TXQ_UNLOCK(sc); 2196 } 2197 } 2198 2199 static void 2200 wpi_notif_intr(struct wpi_softc *sc) 2201 { 2202 struct ifnet *ifp = sc->sc_ifp; 2203 struct ieee80211com *ic = ifp->if_l2com; 2204 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2205 uint32_t hw; 2206 2207 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2208 BUS_DMASYNC_POSTREAD); 2209 2210 hw = le32toh(sc->shared->next) & 0xfff; 2211 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2212 2213 while (sc->rxq.cur != hw) { 2214 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2215 2216 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2217 struct wpi_rx_desc *desc; 2218 2219 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2220 BUS_DMASYNC_POSTREAD); 2221 desc = mtod(data->m, struct wpi_rx_desc *); 2222 2223 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2224 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2225 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2226 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2227 2228 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2229 /* Reply to a command. */ 2230 wpi_cmd_done(sc, desc); 2231 } 2232 2233 switch (desc->type) { 2234 case WPI_RX_DONE: 2235 /* An 802.11 frame has been received. */ 2236 wpi_rx_done(sc, desc, data); 2237 2238 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2239 /* wpi_stop() was called. */ 2240 return; 2241 } 2242 2243 break; 2244 2245 case WPI_TX_DONE: 2246 /* An 802.11 frame has been transmitted. */ 2247 wpi_tx_done(sc, desc); 2248 break; 2249 2250 case WPI_RX_STATISTICS: 2251 case WPI_BEACON_STATISTICS: 2252 wpi_rx_statistics(sc, desc, data); 2253 break; 2254 2255 case WPI_BEACON_MISSED: 2256 { 2257 struct wpi_beacon_missed *miss = 2258 (struct wpi_beacon_missed *)(desc + 1); 2259 uint32_t expected, misses, received, threshold; 2260 2261 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2262 BUS_DMASYNC_POSTREAD); 2263 2264 misses = le32toh(miss->consecutive); 2265 expected = le32toh(miss->expected); 2266 received = le32toh(miss->received); 2267 threshold = MAX(2, vap->iv_bmissthreshold); 2268 2269 DPRINTF(sc, WPI_DEBUG_BMISS, 2270 "%s: beacons missed %u(%u) (received %u/%u)\n", 2271 __func__, misses, le32toh(miss->total), received, 2272 expected); 2273 2274 if (misses >= threshold || 2275 (received == 0 && expected >= threshold)) { 2276 WPI_RXON_LOCK(sc); 2277 if (callout_pending(&sc->scan_timeout)) { 2278 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2279 0, 1); 2280 } 2281 WPI_RXON_UNLOCK(sc); 2282 if (vap->iv_state == IEEE80211_S_RUN && 2283 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2284 ieee80211_beacon_miss(ic); 2285 } 2286 2287 break; 2288 } 2289 #ifdef WPI_DEBUG 2290 case WPI_BEACON_SENT: 2291 { 2292 struct wpi_tx_stat *stat = 2293 (struct wpi_tx_stat *)(desc + 1); 2294 uint64_t *tsf = (uint64_t *)(stat + 1); 2295 uint32_t *mode = (uint32_t *)(tsf + 1); 2296 2297 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2298 BUS_DMASYNC_POSTREAD); 2299 2300 DPRINTF(sc, WPI_DEBUG_BEACON, 2301 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2302 "duration %u, status %x, tsf %ju, mode %x\n", 2303 stat->rtsfailcnt, stat->ackfailcnt, 2304 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2305 le32toh(stat->status), *tsf, *mode); 2306 2307 break; 2308 } 2309 #endif 2310 case WPI_UC_READY: 2311 { 2312 struct wpi_ucode_info *uc = 2313 (struct wpi_ucode_info *)(desc + 1); 2314 2315 /* The microcontroller is ready. */ 2316 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2317 BUS_DMASYNC_POSTREAD); 2318 DPRINTF(sc, WPI_DEBUG_RESET, 2319 "microcode alive notification version=%d.%d " 2320 "subtype=%x alive=%x\n", uc->major, uc->minor, 2321 uc->subtype, le32toh(uc->valid)); 2322 2323 if (le32toh(uc->valid) != 1) { 2324 device_printf(sc->sc_dev, 2325 "microcontroller initialization failed\n"); 2326 wpi_stop_locked(sc); 2327 } 2328 /* Save the address of the error log in SRAM. */ 2329 sc->errptr = le32toh(uc->errptr); 2330 break; 2331 } 2332 case WPI_STATE_CHANGED: 2333 { 2334 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2335 BUS_DMASYNC_POSTREAD); 2336 2337 uint32_t *status = (uint32_t *)(desc + 1); 2338 2339 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2340 le32toh(*status)); 2341 2342 if (le32toh(*status) & 1) { 2343 WPI_NT_LOCK(sc); 2344 wpi_clear_node_table(sc); 2345 WPI_NT_UNLOCK(sc); 2346 taskqueue_enqueue(sc->sc_tq, 2347 &sc->sc_radiooff_task); 2348 return; 2349 } 2350 break; 2351 } 2352 #ifdef WPI_DEBUG 2353 case WPI_START_SCAN: 2354 { 2355 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2356 BUS_DMASYNC_POSTREAD); 2357 2358 struct wpi_start_scan *scan = 2359 (struct wpi_start_scan *)(desc + 1); 2360 DPRINTF(sc, WPI_DEBUG_SCAN, 2361 "%s: scanning channel %d status %x\n", 2362 __func__, scan->chan, le32toh(scan->status)); 2363 2364 break; 2365 } 2366 #endif 2367 case WPI_STOP_SCAN: 2368 { 2369 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2370 BUS_DMASYNC_POSTREAD); 2371 2372 struct wpi_stop_scan *scan = 2373 (struct wpi_stop_scan *)(desc + 1); 2374 2375 DPRINTF(sc, WPI_DEBUG_SCAN, 2376 "scan finished nchan=%d status=%d chan=%d\n", 2377 scan->nchan, scan->status, scan->chan); 2378 2379 WPI_RXON_LOCK(sc); 2380 callout_stop(&sc->scan_timeout); 2381 WPI_RXON_UNLOCK(sc); 2382 if (scan->status == WPI_SCAN_ABORTED) 2383 ieee80211_cancel_scan(vap); 2384 else 2385 ieee80211_scan_next(vap); 2386 break; 2387 } 2388 } 2389 2390 if (sc->rxq.cur % 8 == 0) { 2391 /* Tell the firmware what we have processed. */ 2392 sc->sc_update_rx_ring(sc); 2393 } 2394 } 2395 } 2396 2397 /* 2398 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2399 * from power-down sleep mode. 2400 */ 2401 static void 2402 wpi_wakeup_intr(struct wpi_softc *sc) 2403 { 2404 int qid; 2405 2406 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2407 "%s: ucode wakeup from power-down sleep\n", __func__); 2408 2409 /* Wakeup RX and TX rings. */ 2410 if (sc->rxq.update) { 2411 sc->rxq.update = 0; 2412 wpi_update_rx_ring(sc); 2413 } 2414 WPI_TXQ_LOCK(sc); 2415 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2416 struct wpi_tx_ring *ring = &sc->txq[qid]; 2417 2418 if (ring->update) { 2419 ring->update = 0; 2420 wpi_update_tx_ring(sc, ring); 2421 } 2422 } 2423 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2424 WPI_TXQ_UNLOCK(sc); 2425 } 2426 2427 /* 2428 * This function prints firmware registers 2429 */ 2430 #ifdef WPI_DEBUG 2431 static void 2432 wpi_debug_registers(struct wpi_softc *sc) 2433 { 2434 size_t i; 2435 static const uint32_t csr_tbl[] = { 2436 WPI_HW_IF_CONFIG, 2437 WPI_INT, 2438 WPI_INT_MASK, 2439 WPI_FH_INT, 2440 WPI_GPIO_IN, 2441 WPI_RESET, 2442 WPI_GP_CNTRL, 2443 WPI_EEPROM, 2444 WPI_EEPROM_GP, 2445 WPI_GIO, 2446 WPI_UCODE_GP1, 2447 WPI_UCODE_GP2, 2448 WPI_GIO_CHICKEN, 2449 WPI_ANA_PLL, 2450 WPI_DBG_HPET_MEM, 2451 }; 2452 static const uint32_t prph_tbl[] = { 2453 WPI_APMG_CLK_CTRL, 2454 WPI_APMG_PS, 2455 WPI_APMG_PCI_STT, 2456 WPI_APMG_RFKILL, 2457 }; 2458 2459 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2460 2461 for (i = 0; i < nitems(csr_tbl); i++) { 2462 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2463 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2464 2465 if ((i + 1) % 2 == 0) 2466 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2467 } 2468 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2469 2470 if (wpi_nic_lock(sc) == 0) { 2471 for (i = 0; i < nitems(prph_tbl); i++) { 2472 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2473 wpi_get_prph_string(prph_tbl[i]), 2474 wpi_prph_read(sc, prph_tbl[i])); 2475 2476 if ((i + 1) % 2 == 0) 2477 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2478 } 2479 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2480 wpi_nic_unlock(sc); 2481 } else { 2482 DPRINTF(sc, WPI_DEBUG_REGISTER, 2483 "Cannot access internal registers.\n"); 2484 } 2485 } 2486 #endif 2487 2488 /* 2489 * Dump the error log of the firmware when a firmware panic occurs. Although 2490 * we can't debug the firmware because it is neither open source nor free, it 2491 * can help us to identify certain classes of problems. 2492 */ 2493 static void 2494 wpi_fatal_intr(struct wpi_softc *sc) 2495 { 2496 struct wpi_fw_dump dump; 2497 uint32_t i, offset, count; 2498 2499 /* Check that the error log address is valid. */ 2500 if (sc->errptr < WPI_FW_DATA_BASE || 2501 sc->errptr + sizeof (dump) > 2502 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2503 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2504 sc->errptr); 2505 return; 2506 } 2507 if (wpi_nic_lock(sc) != 0) { 2508 printf("%s: could not read firmware error log\n", __func__); 2509 return; 2510 } 2511 /* Read number of entries in the log. */ 2512 count = wpi_mem_read(sc, sc->errptr); 2513 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2514 printf("%s: invalid count field (count = %u)\n", __func__, 2515 count); 2516 wpi_nic_unlock(sc); 2517 return; 2518 } 2519 /* Skip "count" field. */ 2520 offset = sc->errptr + sizeof (uint32_t); 2521 printf("firmware error log (count = %u):\n", count); 2522 for (i = 0; i < count; i++) { 2523 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2524 sizeof (dump) / sizeof (uint32_t)); 2525 2526 printf(" error type = \"%s\" (0x%08X)\n", 2527 (dump.desc < nitems(wpi_fw_errmsg)) ? 2528 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2529 dump.desc); 2530 printf(" error data = 0x%08X\n", 2531 dump.data); 2532 printf(" branch link = 0x%08X%08X\n", 2533 dump.blink[0], dump.blink[1]); 2534 printf(" interrupt link = 0x%08X%08X\n", 2535 dump.ilink[0], dump.ilink[1]); 2536 printf(" time = %u\n", dump.time); 2537 2538 offset += sizeof (dump); 2539 } 2540 wpi_nic_unlock(sc); 2541 /* Dump driver status (TX and RX rings) while we're here. */ 2542 printf("driver status:\n"); 2543 WPI_TXQ_LOCK(sc); 2544 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2545 struct wpi_tx_ring *ring = &sc->txq[i]; 2546 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2547 i, ring->qid, ring->cur, ring->queued); 2548 } 2549 WPI_TXQ_UNLOCK(sc); 2550 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2551 } 2552 2553 static void 2554 wpi_intr(void *arg) 2555 { 2556 struct wpi_softc *sc = arg; 2557 struct ifnet *ifp = sc->sc_ifp; 2558 uint32_t r1, r2; 2559 2560 WPI_LOCK(sc); 2561 2562 /* Disable interrupts. */ 2563 WPI_WRITE(sc, WPI_INT_MASK, 0); 2564 2565 r1 = WPI_READ(sc, WPI_INT); 2566 2567 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2568 goto end; /* Hardware gone! */ 2569 2570 r2 = WPI_READ(sc, WPI_FH_INT); 2571 2572 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2573 r1, r2); 2574 2575 if (r1 == 0 && r2 == 0) 2576 goto done; /* Interrupt not for us. */ 2577 2578 /* Acknowledge interrupts. */ 2579 WPI_WRITE(sc, WPI_INT, r1); 2580 WPI_WRITE(sc, WPI_FH_INT, r2); 2581 2582 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2583 device_printf(sc->sc_dev, "fatal firmware error\n"); 2584 #ifdef WPI_DEBUG 2585 wpi_debug_registers(sc); 2586 #endif 2587 wpi_fatal_intr(sc); 2588 DPRINTF(sc, WPI_DEBUG_HW, 2589 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2590 "(Hardware Error)"); 2591 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2592 goto end; 2593 } 2594 2595 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2596 (r2 & WPI_FH_INT_RX)) 2597 wpi_notif_intr(sc); 2598 2599 if (r1 & WPI_INT_ALIVE) 2600 wakeup(sc); /* Firmware is alive. */ 2601 2602 if (r1 & WPI_INT_WAKEUP) 2603 wpi_wakeup_intr(sc); 2604 2605 done: 2606 /* Re-enable interrupts. */ 2607 if (ifp->if_flags & IFF_UP) 2608 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2609 2610 end: WPI_UNLOCK(sc); 2611 } 2612 2613 static int 2614 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2615 { 2616 struct ifnet *ifp = sc->sc_ifp; 2617 struct ieee80211_frame *wh; 2618 struct wpi_tx_cmd *cmd; 2619 struct wpi_tx_data *data; 2620 struct wpi_tx_desc *desc; 2621 struct wpi_tx_ring *ring; 2622 struct mbuf *m1; 2623 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2624 int error, i, hdrlen, nsegs, totlen, pad; 2625 2626 WPI_TXQ_LOCK(sc); 2627 2628 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2629 2630 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2631 2632 if (sc->txq_active == 0) { 2633 /* wpi_stop() was called */ 2634 error = ENETDOWN; 2635 goto fail; 2636 } 2637 2638 wh = mtod(buf->m, struct ieee80211_frame *); 2639 hdrlen = ieee80211_anyhdrsize(wh); 2640 totlen = buf->m->m_pkthdr.len; 2641 2642 if (hdrlen & 3) { 2643 /* First segment length must be a multiple of 4. */ 2644 pad = 4 - (hdrlen & 3); 2645 } else 2646 pad = 0; 2647 2648 ring = &sc->txq[buf->ac]; 2649 desc = &ring->desc[ring->cur]; 2650 data = &ring->data[ring->cur]; 2651 2652 /* Prepare TX firmware command. */ 2653 cmd = &ring->cmd[ring->cur]; 2654 cmd->code = buf->code; 2655 cmd->flags = 0; 2656 cmd->qid = ring->qid; 2657 cmd->idx = ring->cur; 2658 2659 memcpy(cmd->data, buf->data, buf->size); 2660 2661 /* Save and trim IEEE802.11 header. */ 2662 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2663 m_adj(buf->m, hdrlen); 2664 2665 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2666 segs, &nsegs, BUS_DMA_NOWAIT); 2667 if (error != 0 && error != EFBIG) { 2668 device_printf(sc->sc_dev, 2669 "%s: can't map mbuf (error %d)\n", __func__, error); 2670 goto fail; 2671 } 2672 if (error != 0) { 2673 /* Too many DMA segments, linearize mbuf. */ 2674 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2675 if (m1 == NULL) { 2676 device_printf(sc->sc_dev, 2677 "%s: could not defrag mbuf\n", __func__); 2678 error = ENOBUFS; 2679 goto fail; 2680 } 2681 buf->m = m1; 2682 2683 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2684 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2685 if (error != 0) { 2686 device_printf(sc->sc_dev, 2687 "%s: can't map mbuf (error %d)\n", __func__, 2688 error); 2689 goto fail; 2690 } 2691 } 2692 2693 KASSERT(nsegs < WPI_MAX_SCATTER, 2694 ("too many DMA segments, nsegs (%d) should be less than %d", 2695 nsegs, WPI_MAX_SCATTER)); 2696 2697 data->m = buf->m; 2698 data->ni = buf->ni; 2699 2700 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2701 __func__, ring->qid, ring->cur, totlen, nsegs); 2702 2703 /* Fill TX descriptor. */ 2704 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2705 /* First DMA segment is used by the TX command. */ 2706 desc->segs[0].addr = htole32(data->cmd_paddr); 2707 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2708 /* Other DMA segments are for data payload. */ 2709 seg = &segs[0]; 2710 for (i = 1; i <= nsegs; i++) { 2711 desc->segs[i].addr = htole32(seg->ds_addr); 2712 desc->segs[i].len = htole32(seg->ds_len); 2713 seg++; 2714 } 2715 2716 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2717 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2718 BUS_DMASYNC_PREWRITE); 2719 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2720 BUS_DMASYNC_PREWRITE); 2721 2722 /* Kick TX ring. */ 2723 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2724 sc->sc_update_tx_ring(sc, ring); 2725 2726 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2727 /* Mark TX ring as full if we reach a certain threshold. */ 2728 WPI_TXQ_STATE_LOCK(sc); 2729 if (++ring->queued > WPI_TX_RING_HIMARK) { 2730 sc->qfullmsk |= 1 << ring->qid; 2731 2732 IF_LOCK(&ifp->if_snd); 2733 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2734 IF_UNLOCK(&ifp->if_snd); 2735 } 2736 2737 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2738 WPI_TXQ_STATE_UNLOCK(sc); 2739 } 2740 2741 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2742 2743 WPI_TXQ_UNLOCK(sc); 2744 2745 return 0; 2746 2747 fail: m_freem(buf->m); 2748 2749 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2750 2751 WPI_TXQ_UNLOCK(sc); 2752 2753 return error; 2754 } 2755 2756 /* 2757 * Construct the data packet for a transmit buffer. 2758 */ 2759 static int 2760 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2761 { 2762 const struct ieee80211_txparam *tp; 2763 struct ieee80211vap *vap = ni->ni_vap; 2764 struct ieee80211com *ic = ni->ni_ic; 2765 struct wpi_node *wn = WPI_NODE(ni); 2766 struct ieee80211_channel *chan; 2767 struct ieee80211_frame *wh; 2768 struct ieee80211_key *k = NULL; 2769 struct wpi_buf tx_data; 2770 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2771 uint32_t flags; 2772 uint16_t qos; 2773 uint8_t tid, type; 2774 int ac, error, swcrypt, rate, ismcast, totlen; 2775 2776 wh = mtod(m, struct ieee80211_frame *); 2777 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2778 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2779 2780 /* Select EDCA Access Category and TX ring for this frame. */ 2781 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2782 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2783 tid = qos & IEEE80211_QOS_TID; 2784 } else { 2785 qos = 0; 2786 tid = 0; 2787 } 2788 ac = M_WME_GETAC(m); 2789 2790 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2791 ni->ni_chan : ic->ic_curchan; 2792 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2793 2794 /* Choose a TX rate index. */ 2795 if (type == IEEE80211_FC0_TYPE_MGT) 2796 rate = tp->mgmtrate; 2797 else if (ismcast) 2798 rate = tp->mcastrate; 2799 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2800 rate = tp->ucastrate; 2801 else if (m->m_flags & M_EAPOL) 2802 rate = tp->mgmtrate; 2803 else { 2804 /* XXX pass pktlen */ 2805 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2806 rate = ni->ni_txrate; 2807 } 2808 2809 /* Encrypt the frame if need be. */ 2810 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2811 /* Retrieve key for TX. */ 2812 k = ieee80211_crypto_encap(ni, m); 2813 if (k == NULL) { 2814 error = ENOBUFS; 2815 goto fail; 2816 } 2817 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2818 2819 /* 802.11 header may have moved. */ 2820 wh = mtod(m, struct ieee80211_frame *); 2821 } 2822 totlen = m->m_pkthdr.len; 2823 2824 if (ieee80211_radiotap_active_vap(vap)) { 2825 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2826 2827 tap->wt_flags = 0; 2828 tap->wt_rate = rate; 2829 if (k != NULL) 2830 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2831 2832 ieee80211_radiotap_tx(vap, m); 2833 } 2834 2835 flags = 0; 2836 if (!ismcast) { 2837 /* Unicast frame, check if an ACK is expected. */ 2838 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2839 IEEE80211_QOS_ACKPOLICY_NOACK) 2840 flags |= WPI_TX_NEED_ACK; 2841 } 2842 2843 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2844 flags |= WPI_TX_AUTO_SEQ; 2845 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2846 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2847 2848 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2849 if (!ismcast) { 2850 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2851 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2852 flags |= WPI_TX_NEED_RTS; 2853 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2854 WPI_RATE_IS_OFDM(rate)) { 2855 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2856 flags |= WPI_TX_NEED_CTS; 2857 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2858 flags |= WPI_TX_NEED_RTS; 2859 } 2860 2861 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2862 flags |= WPI_TX_FULL_TXOP; 2863 } 2864 2865 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2866 if (type == IEEE80211_FC0_TYPE_MGT) { 2867 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2868 2869 /* Tell HW to set timestamp in probe responses. */ 2870 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2871 flags |= WPI_TX_INSERT_TSTAMP; 2872 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2873 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2874 tx->timeout = htole16(3); 2875 else 2876 tx->timeout = htole16(2); 2877 } 2878 2879 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2880 tx->id = WPI_ID_BROADCAST; 2881 else { 2882 if (wn->id == WPI_ID_UNDEFINED) { 2883 device_printf(sc->sc_dev, 2884 "%s: undefined node id\n", __func__); 2885 error = EINVAL; 2886 goto fail; 2887 } 2888 2889 tx->id = wn->id; 2890 } 2891 2892 if (k != NULL && !swcrypt) { 2893 switch (k->wk_cipher->ic_cipher) { 2894 case IEEE80211_CIPHER_AES_CCM: 2895 tx->security = WPI_CIPHER_CCMP; 2896 break; 2897 2898 default: 2899 break; 2900 } 2901 2902 memcpy(tx->key, k->wk_key, k->wk_keylen); 2903 } 2904 2905 tx->len = htole16(totlen); 2906 tx->flags = htole32(flags); 2907 tx->plcp = rate2plcp(rate); 2908 tx->tid = tid; 2909 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2910 tx->ofdm_mask = 0xff; 2911 tx->cck_mask = 0x0f; 2912 tx->rts_ntries = 7; 2913 tx->data_ntries = tp->maxretry; 2914 2915 tx_data.ni = ni; 2916 tx_data.m = m; 2917 tx_data.size = sizeof(struct wpi_cmd_data); 2918 tx_data.code = WPI_CMD_TX_DATA; 2919 tx_data.ac = ac; 2920 2921 return wpi_cmd2(sc, &tx_data); 2922 2923 fail: m_freem(m); 2924 return error; 2925 } 2926 2927 static int 2928 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2929 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2930 { 2931 struct ieee80211vap *vap = ni->ni_vap; 2932 struct ieee80211_key *k = NULL; 2933 struct ieee80211_frame *wh; 2934 struct wpi_buf tx_data; 2935 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2936 uint32_t flags; 2937 uint8_t type; 2938 int ac, rate, swcrypt, totlen; 2939 2940 wh = mtod(m, struct ieee80211_frame *); 2941 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2942 2943 ac = params->ibp_pri & 3; 2944 2945 /* Choose a TX rate index. */ 2946 rate = params->ibp_rate0; 2947 2948 flags = 0; 2949 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2950 flags |= WPI_TX_AUTO_SEQ; 2951 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2952 flags |= WPI_TX_NEED_ACK; 2953 if (params->ibp_flags & IEEE80211_BPF_RTS) 2954 flags |= WPI_TX_NEED_RTS; 2955 if (params->ibp_flags & IEEE80211_BPF_CTS) 2956 flags |= WPI_TX_NEED_CTS; 2957 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2958 flags |= WPI_TX_FULL_TXOP; 2959 2960 /* Encrypt the frame if need be. */ 2961 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2962 /* Retrieve key for TX. */ 2963 k = ieee80211_crypto_encap(ni, m); 2964 if (k == NULL) { 2965 m_freem(m); 2966 return ENOBUFS; 2967 } 2968 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2969 2970 /* 802.11 header may have moved. */ 2971 wh = mtod(m, struct ieee80211_frame *); 2972 } 2973 totlen = m->m_pkthdr.len; 2974 2975 if (ieee80211_radiotap_active_vap(vap)) { 2976 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2977 2978 tap->wt_flags = 0; 2979 tap->wt_rate = rate; 2980 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2981 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2982 2983 ieee80211_radiotap_tx(vap, m); 2984 } 2985 2986 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2987 if (type == IEEE80211_FC0_TYPE_MGT) { 2988 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2989 2990 /* Tell HW to set timestamp in probe responses. */ 2991 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2992 flags |= WPI_TX_INSERT_TSTAMP; 2993 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2994 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2995 tx->timeout = htole16(3); 2996 else 2997 tx->timeout = htole16(2); 2998 } 2999 3000 if (k != NULL && !swcrypt) { 3001 switch (k->wk_cipher->ic_cipher) { 3002 case IEEE80211_CIPHER_AES_CCM: 3003 tx->security = WPI_CIPHER_CCMP; 3004 break; 3005 3006 default: 3007 break; 3008 } 3009 3010 memcpy(tx->key, k->wk_key, k->wk_keylen); 3011 } 3012 3013 tx->len = htole16(totlen); 3014 tx->flags = htole32(flags); 3015 tx->plcp = rate2plcp(rate); 3016 tx->id = WPI_ID_BROADCAST; 3017 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3018 tx->rts_ntries = params->ibp_try1; 3019 tx->data_ntries = params->ibp_try0; 3020 3021 tx_data.ni = ni; 3022 tx_data.m = m; 3023 tx_data.size = sizeof(struct wpi_cmd_data); 3024 tx_data.code = WPI_CMD_TX_DATA; 3025 tx_data.ac = ac; 3026 3027 return wpi_cmd2(sc, &tx_data); 3028 } 3029 3030 static int 3031 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3032 const struct ieee80211_bpf_params *params) 3033 { 3034 struct ieee80211com *ic = ni->ni_ic; 3035 struct ifnet *ifp = ic->ic_ifp; 3036 struct wpi_softc *sc = ifp->if_softc; 3037 int error = 0; 3038 3039 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3040 3041 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3042 ieee80211_free_node(ni); 3043 m_freem(m); 3044 return ENETDOWN; 3045 } 3046 3047 WPI_TX_LOCK(sc); 3048 if (params == NULL) { 3049 /* 3050 * Legacy path; interpret frame contents to decide 3051 * precisely how to send the frame. 3052 */ 3053 error = wpi_tx_data(sc, m, ni); 3054 } else { 3055 /* 3056 * Caller supplied explicit parameters to use in 3057 * sending the frame. 3058 */ 3059 error = wpi_tx_data_raw(sc, m, ni, params); 3060 } 3061 WPI_TX_UNLOCK(sc); 3062 3063 if (error != 0) { 3064 /* NB: m is reclaimed on tx failure */ 3065 ieee80211_free_node(ni); 3066 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3067 3068 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3069 3070 return error; 3071 } 3072 3073 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3074 3075 return 0; 3076 } 3077 3078 /** 3079 * Process data waiting to be sent on the IFNET output queue 3080 */ 3081 static void 3082 wpi_start(struct ifnet *ifp) 3083 { 3084 struct wpi_softc *sc = ifp->if_softc; 3085 struct ieee80211_node *ni; 3086 struct mbuf *m; 3087 3088 WPI_TX_LOCK(sc); 3089 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3090 3091 for (;;) { 3092 IF_LOCK(&ifp->if_snd); 3093 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 3094 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3095 IF_UNLOCK(&ifp->if_snd); 3096 break; 3097 } 3098 IF_UNLOCK(&ifp->if_snd); 3099 3100 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3101 if (m == NULL) 3102 break; 3103 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3104 if (wpi_tx_data(sc, m, ni) != 0) { 3105 ieee80211_free_node(ni); 3106 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3107 } 3108 } 3109 3110 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3111 WPI_TX_UNLOCK(sc); 3112 } 3113 3114 static void 3115 wpi_start_task(void *arg0, int pending) 3116 { 3117 struct wpi_softc *sc = arg0; 3118 struct ifnet *ifp = sc->sc_ifp; 3119 3120 wpi_start(ifp); 3121 } 3122 3123 static void 3124 wpi_watchdog_rfkill(void *arg) 3125 { 3126 struct wpi_softc *sc = arg; 3127 struct ifnet *ifp = sc->sc_ifp; 3128 struct ieee80211com *ic = ifp->if_l2com; 3129 3130 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3131 3132 /* No need to lock firmware memory. */ 3133 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3134 /* Radio kill switch is still off. */ 3135 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3136 sc); 3137 } else 3138 ieee80211_runtask(ic, &sc->sc_radioon_task); 3139 } 3140 3141 static void 3142 wpi_scan_timeout(void *arg) 3143 { 3144 struct wpi_softc *sc = arg; 3145 struct ifnet *ifp = sc->sc_ifp; 3146 3147 if_printf(ifp, "scan timeout\n"); 3148 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3149 } 3150 3151 static void 3152 wpi_tx_timeout(void *arg) 3153 { 3154 struct wpi_softc *sc = arg; 3155 struct ifnet *ifp = sc->sc_ifp; 3156 3157 if_printf(ifp, "device timeout\n"); 3158 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3159 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3160 } 3161 3162 static int 3163 wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3164 { 3165 struct wpi_softc *sc = ifp->if_softc; 3166 struct ieee80211com *ic = ifp->if_l2com; 3167 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3168 struct ifreq *ifr = (struct ifreq *) data; 3169 int error = 0; 3170 3171 switch (cmd) { 3172 case SIOCGIFADDR: 3173 error = ether_ioctl(ifp, cmd, data); 3174 break; 3175 case SIOCSIFFLAGS: 3176 if (ifp->if_flags & IFF_UP) { 3177 wpi_init(sc); 3178 3179 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && 3180 vap != NULL) 3181 ieee80211_stop(vap); 3182 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3183 wpi_stop(sc); 3184 break; 3185 case SIOCGIFMEDIA: 3186 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3187 break; 3188 default: 3189 error = EINVAL; 3190 break; 3191 } 3192 return error; 3193 } 3194 3195 /* 3196 * Send a command to the firmware. 3197 */ 3198 static int 3199 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3200 int async) 3201 { 3202 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3203 struct wpi_tx_desc *desc; 3204 struct wpi_tx_data *data; 3205 struct wpi_tx_cmd *cmd; 3206 struct mbuf *m; 3207 bus_addr_t paddr; 3208 int totlen, error; 3209 3210 WPI_TXQ_LOCK(sc); 3211 3212 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3213 3214 if (sc->txq_active == 0) { 3215 /* wpi_stop() was called */ 3216 error = 0; 3217 goto fail; 3218 } 3219 3220 if (async == 0) 3221 WPI_LOCK_ASSERT(sc); 3222 3223 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3224 __func__, wpi_cmd_str(code), size, async); 3225 3226 desc = &ring->desc[ring->cur]; 3227 data = &ring->data[ring->cur]; 3228 totlen = 4 + size; 3229 3230 if (size > sizeof cmd->data) { 3231 /* Command is too large to fit in a descriptor. */ 3232 if (totlen > MCLBYTES) { 3233 error = EINVAL; 3234 goto fail; 3235 } 3236 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3237 if (m == NULL) { 3238 error = ENOMEM; 3239 goto fail; 3240 } 3241 cmd = mtod(m, struct wpi_tx_cmd *); 3242 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3243 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3244 if (error != 0) { 3245 m_freem(m); 3246 goto fail; 3247 } 3248 data->m = m; 3249 } else { 3250 cmd = &ring->cmd[ring->cur]; 3251 paddr = data->cmd_paddr; 3252 } 3253 3254 cmd->code = code; 3255 cmd->flags = 0; 3256 cmd->qid = ring->qid; 3257 cmd->idx = ring->cur; 3258 memcpy(cmd->data, buf, size); 3259 3260 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3261 desc->segs[0].addr = htole32(paddr); 3262 desc->segs[0].len = htole32(totlen); 3263 3264 if (size > sizeof cmd->data) { 3265 bus_dmamap_sync(ring->data_dmat, data->map, 3266 BUS_DMASYNC_PREWRITE); 3267 } else { 3268 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3269 BUS_DMASYNC_PREWRITE); 3270 } 3271 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3272 BUS_DMASYNC_PREWRITE); 3273 3274 /* Kick command ring. */ 3275 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3276 sc->sc_update_tx_ring(sc, ring); 3277 3278 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3279 3280 WPI_TXQ_UNLOCK(sc); 3281 3282 if (async) 3283 return 0; 3284 3285 return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3286 3287 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3288 3289 WPI_TXQ_UNLOCK(sc); 3290 3291 return error; 3292 } 3293 3294 /* 3295 * Configure HW multi-rate retries. 3296 */ 3297 static int 3298 wpi_mrr_setup(struct wpi_softc *sc) 3299 { 3300 struct ifnet *ifp = sc->sc_ifp; 3301 struct ieee80211com *ic = ifp->if_l2com; 3302 struct wpi_mrr_setup mrr; 3303 int i, error; 3304 3305 /* CCK rates (not used with 802.11a). */ 3306 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3307 mrr.rates[i].flags = 0; 3308 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3309 /* Fallback to the immediate lower CCK rate (if any.) */ 3310 mrr.rates[i].next = 3311 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3312 /* Try twice at this rate before falling back to "next". */ 3313 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3314 } 3315 /* OFDM rates (not used with 802.11b). */ 3316 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3317 mrr.rates[i].flags = 0; 3318 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3319 /* Fallback to the immediate lower rate (if any.) */ 3320 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3321 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3322 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3323 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3324 i - 1; 3325 /* Try twice at this rate before falling back to "next". */ 3326 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3327 } 3328 /* Setup MRR for control frames. */ 3329 mrr.which = htole32(WPI_MRR_CTL); 3330 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3331 if (error != 0) { 3332 device_printf(sc->sc_dev, 3333 "could not setup MRR for control frames\n"); 3334 return error; 3335 } 3336 /* Setup MRR for data frames. */ 3337 mrr.which = htole32(WPI_MRR_DATA); 3338 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3339 if (error != 0) { 3340 device_printf(sc->sc_dev, 3341 "could not setup MRR for data frames\n"); 3342 return error; 3343 } 3344 return 0; 3345 } 3346 3347 static int 3348 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3349 { 3350 struct ieee80211com *ic = ni->ni_ic; 3351 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3352 struct wpi_node *wn = WPI_NODE(ni); 3353 struct wpi_node_info node; 3354 int error; 3355 3356 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3357 3358 if (wn->id == WPI_ID_UNDEFINED) 3359 return EINVAL; 3360 3361 memset(&node, 0, sizeof node); 3362 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3363 node.id = wn->id; 3364 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3365 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3366 node.action = htole32(WPI_ACTION_SET_RATE); 3367 node.antenna = WPI_ANTENNA_BOTH; 3368 3369 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3370 wn->id, ether_sprintf(ni->ni_macaddr)); 3371 3372 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3373 if (error != 0) { 3374 device_printf(sc->sc_dev, 3375 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3376 error); 3377 return error; 3378 } 3379 3380 if (wvp->wv_gtk != 0) { 3381 error = wpi_set_global_keys(ni); 3382 if (error != 0) { 3383 device_printf(sc->sc_dev, 3384 "%s: error while setting global keys\n", __func__); 3385 return ENXIO; 3386 } 3387 } 3388 3389 return 0; 3390 } 3391 3392 /* 3393 * Broadcast node is used to send group-addressed and management frames. 3394 */ 3395 static int 3396 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3397 { 3398 struct ifnet *ifp = sc->sc_ifp; 3399 struct ieee80211com *ic = ifp->if_l2com; 3400 struct wpi_node_info node; 3401 3402 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3403 3404 memset(&node, 0, sizeof node); 3405 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3406 node.id = WPI_ID_BROADCAST; 3407 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3408 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3409 node.action = htole32(WPI_ACTION_SET_RATE); 3410 node.antenna = WPI_ANTENNA_BOTH; 3411 3412 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3413 3414 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3415 } 3416 3417 static int 3418 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3419 { 3420 struct wpi_node *wn = WPI_NODE(ni); 3421 int error; 3422 3423 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3424 3425 wn->id = wpi_add_node_entry_sta(sc); 3426 3427 if ((error = wpi_add_node(sc, ni)) != 0) { 3428 wpi_del_node_entry(sc, wn->id); 3429 wn->id = WPI_ID_UNDEFINED; 3430 return error; 3431 } 3432 3433 return 0; 3434 } 3435 3436 static int 3437 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3438 { 3439 struct wpi_node *wn = WPI_NODE(ni); 3440 int error; 3441 3442 KASSERT(wn->id == WPI_ID_UNDEFINED, 3443 ("the node %d was added before", wn->id)); 3444 3445 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3446 3447 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3448 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3449 return ENOMEM; 3450 } 3451 3452 if ((error = wpi_add_node(sc, ni)) != 0) { 3453 wpi_del_node_entry(sc, wn->id); 3454 wn->id = WPI_ID_UNDEFINED; 3455 return error; 3456 } 3457 3458 return 0; 3459 } 3460 3461 static void 3462 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3463 { 3464 struct wpi_node *wn = WPI_NODE(ni); 3465 struct wpi_cmd_del_node node; 3466 int error; 3467 3468 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3469 3470 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3471 3472 memset(&node, 0, sizeof node); 3473 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3474 node.count = 1; 3475 3476 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3477 wn->id, ether_sprintf(ni->ni_macaddr)); 3478 3479 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3480 if (error != 0) { 3481 device_printf(sc->sc_dev, 3482 "%s: could not delete node %u, error %d\n", __func__, 3483 wn->id, error); 3484 } 3485 } 3486 3487 static int 3488 wpi_updateedca(struct ieee80211com *ic) 3489 { 3490 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3491 struct wpi_softc *sc = ic->ic_ifp->if_softc; 3492 struct wpi_edca_params cmd; 3493 int aci, error; 3494 3495 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3496 3497 memset(&cmd, 0, sizeof cmd); 3498 cmd.flags = htole32(WPI_EDCA_UPDATE); 3499 for (aci = 0; aci < WME_NUM_AC; aci++) { 3500 const struct wmeParams *ac = 3501 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3502 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3503 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3504 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3505 cmd.ac[aci].txoplimit = 3506 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3507 3508 DPRINTF(sc, WPI_DEBUG_EDCA, 3509 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3510 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3511 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3512 cmd.ac[aci].txoplimit); 3513 } 3514 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3515 3516 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3517 3518 return error; 3519 #undef WPI_EXP2 3520 } 3521 3522 static void 3523 wpi_set_promisc(struct wpi_softc *sc) 3524 { 3525 struct ifnet *ifp = sc->sc_ifp; 3526 struct ieee80211com *ic = ifp->if_l2com; 3527 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3528 uint32_t promisc_filter; 3529 3530 promisc_filter = WPI_FILTER_CTL; 3531 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3532 promisc_filter |= WPI_FILTER_PROMISC; 3533 3534 if (ifp->if_flags & IFF_PROMISC) 3535 sc->rxon.filter |= htole32(promisc_filter); 3536 else 3537 sc->rxon.filter &= ~htole32(promisc_filter); 3538 } 3539 3540 static void 3541 wpi_update_promisc(struct ifnet *ifp) 3542 { 3543 struct wpi_softc *sc = ifp->if_softc; 3544 3545 WPI_RXON_LOCK(sc); 3546 wpi_set_promisc(sc); 3547 3548 if (wpi_send_rxon(sc, 1, 1) != 0) { 3549 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3550 __func__); 3551 } 3552 WPI_RXON_UNLOCK(sc); 3553 } 3554 3555 static void 3556 wpi_update_mcast(struct ifnet *ifp) 3557 { 3558 /* Ignore */ 3559 } 3560 3561 static void 3562 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3563 { 3564 struct wpi_cmd_led led; 3565 3566 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3567 3568 led.which = which; 3569 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3570 led.off = off; 3571 led.on = on; 3572 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3573 } 3574 3575 static int 3576 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3577 { 3578 struct wpi_cmd_timing cmd; 3579 uint64_t val, mod; 3580 3581 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3582 3583 memset(&cmd, 0, sizeof cmd); 3584 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3585 cmd.bintval = htole16(ni->ni_intval); 3586 cmd.lintval = htole16(10); 3587 3588 /* Compute remaining time until next beacon. */ 3589 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3590 mod = le64toh(cmd.tstamp) % val; 3591 cmd.binitval = htole32((uint32_t)(val - mod)); 3592 3593 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3594 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3595 3596 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3597 } 3598 3599 /* 3600 * This function is called periodically (every 60 seconds) to adjust output 3601 * power to temperature changes. 3602 */ 3603 static void 3604 wpi_power_calibration(struct wpi_softc *sc) 3605 { 3606 int temp; 3607 3608 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3609 3610 /* Update sensor data. */ 3611 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3612 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3613 3614 /* Sanity-check read value. */ 3615 if (temp < -260 || temp > 25) { 3616 /* This can't be correct, ignore. */ 3617 DPRINTF(sc, WPI_DEBUG_TEMP, 3618 "out-of-range temperature reported: %d\n", temp); 3619 return; 3620 } 3621 3622 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3623 3624 /* Adjust Tx power if need be. */ 3625 if (abs(temp - sc->temp) <= 6) 3626 return; 3627 3628 sc->temp = temp; 3629 3630 if (wpi_set_txpower(sc, 1) != 0) { 3631 /* just warn, too bad for the automatic calibration... */ 3632 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3633 } 3634 } 3635 3636 /* 3637 * Set TX power for current channel. 3638 */ 3639 static int 3640 wpi_set_txpower(struct wpi_softc *sc, int async) 3641 { 3642 struct wpi_power_group *group; 3643 struct wpi_cmd_txpower cmd; 3644 uint8_t chan; 3645 int idx, is_chan_5ghz, i; 3646 3647 /* Retrieve current channel from last RXON. */ 3648 chan = sc->rxon.chan; 3649 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3650 3651 /* Find the TX power group to which this channel belongs. */ 3652 if (is_chan_5ghz) { 3653 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3654 if (chan <= group->chan) 3655 break; 3656 } else 3657 group = &sc->groups[0]; 3658 3659 memset(&cmd, 0, sizeof cmd); 3660 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3661 cmd.chan = htole16(chan); 3662 3663 /* Set TX power for all OFDM and CCK rates. */ 3664 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3665 /* Retrieve TX power for this channel/rate. */ 3666 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3667 3668 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3669 3670 if (is_chan_5ghz) { 3671 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3672 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3673 } else { 3674 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3675 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3676 } 3677 DPRINTF(sc, WPI_DEBUG_TEMP, 3678 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3679 } 3680 3681 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3682 } 3683 3684 /* 3685 * Determine Tx power index for a given channel/rate combination. 3686 * This takes into account the regulatory information from EEPROM and the 3687 * current temperature. 3688 */ 3689 static int 3690 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3691 uint8_t chan, int is_chan_5ghz, int ridx) 3692 { 3693 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3694 #define fdivround(a, b, n) \ 3695 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3696 3697 /* Linear interpolation. */ 3698 #define interpolate(x, x1, y1, x2, y2, n) \ 3699 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3700 3701 struct wpi_power_sample *sample; 3702 int pwr, idx; 3703 3704 /* Default TX power is group maximum TX power minus 3dB. */ 3705 pwr = group->maxpwr / 2; 3706 3707 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3708 switch (ridx) { 3709 case WPI_RIDX_OFDM36: 3710 pwr -= is_chan_5ghz ? 5 : 0; 3711 break; 3712 case WPI_RIDX_OFDM48: 3713 pwr -= is_chan_5ghz ? 10 : 7; 3714 break; 3715 case WPI_RIDX_OFDM54: 3716 pwr -= is_chan_5ghz ? 12 : 9; 3717 break; 3718 } 3719 3720 /* Never exceed the channel maximum allowed TX power. */ 3721 pwr = min(pwr, sc->maxpwr[chan]); 3722 3723 /* Retrieve TX power index into gain tables from samples. */ 3724 for (sample = group->samples; sample < &group->samples[3]; sample++) 3725 if (pwr > sample[1].power) 3726 break; 3727 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3728 idx = interpolate(pwr, sample[0].power, sample[0].index, 3729 sample[1].power, sample[1].index, 19); 3730 3731 /*- 3732 * Adjust power index based on current temperature: 3733 * - if cooler than factory-calibrated: decrease output power 3734 * - if warmer than factory-calibrated: increase output power 3735 */ 3736 idx -= (sc->temp - group->temp) * 11 / 100; 3737 3738 /* Decrease TX power for CCK rates (-5dB). */ 3739 if (ridx >= WPI_RIDX_CCK1) 3740 idx += 10; 3741 3742 /* Make sure idx stays in a valid range. */ 3743 if (idx < 0) 3744 return 0; 3745 if (idx > WPI_MAX_PWR_INDEX) 3746 return WPI_MAX_PWR_INDEX; 3747 return idx; 3748 3749 #undef interpolate 3750 #undef fdivround 3751 } 3752 3753 /* 3754 * Set STA mode power saving level (between 0 and 5). 3755 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3756 */ 3757 static int 3758 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3759 { 3760 struct wpi_pmgt_cmd cmd; 3761 const struct wpi_pmgt *pmgt; 3762 uint32_t max, skip_dtim; 3763 uint32_t reg; 3764 int i; 3765 3766 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3767 "%s: dtim=%d, level=%d, async=%d\n", 3768 __func__, dtim, level, async); 3769 3770 /* Select which PS parameters to use. */ 3771 if (dtim <= 10) 3772 pmgt = &wpi_pmgt[0][level]; 3773 else 3774 pmgt = &wpi_pmgt[1][level]; 3775 3776 memset(&cmd, 0, sizeof cmd); 3777 WPI_TXQ_LOCK(sc); 3778 if (level != 0) { /* not CAM */ 3779 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3780 sc->sc_flags |= WPI_PS_PATH; 3781 } else 3782 sc->sc_flags &= ~WPI_PS_PATH; 3783 WPI_TXQ_UNLOCK(sc); 3784 /* Retrieve PCIe Active State Power Management (ASPM). */ 3785 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3786 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3787 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3788 3789 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3790 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3791 3792 if (dtim == 0) { 3793 dtim = 1; 3794 skip_dtim = 0; 3795 } else 3796 skip_dtim = pmgt->skip_dtim; 3797 3798 if (skip_dtim != 0) { 3799 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3800 max = pmgt->intval[4]; 3801 if (max == (uint32_t)-1) 3802 max = dtim * (skip_dtim + 1); 3803 else if (max > dtim) 3804 max = (max / dtim) * dtim; 3805 } else 3806 max = dtim; 3807 3808 for (i = 0; i < 5; i++) 3809 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3810 3811 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3812 } 3813 3814 static int 3815 wpi_send_btcoex(struct wpi_softc *sc) 3816 { 3817 struct wpi_bluetooth cmd; 3818 3819 memset(&cmd, 0, sizeof cmd); 3820 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3821 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3822 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3823 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3824 __func__); 3825 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3826 } 3827 3828 static int 3829 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3830 { 3831 int error; 3832 3833 if (async) 3834 WPI_RXON_LOCK_ASSERT(sc); 3835 3836 if (assoc && wpi_check_bss_filter(sc) != 0) { 3837 struct wpi_assoc rxon_assoc; 3838 3839 rxon_assoc.flags = sc->rxon.flags; 3840 rxon_assoc.filter = sc->rxon.filter; 3841 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3842 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3843 rxon_assoc.reserved = 0; 3844 3845 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3846 sizeof (struct wpi_assoc), async); 3847 if (error != 0) { 3848 device_printf(sc->sc_dev, 3849 "RXON_ASSOC command failed, error %d\n", error); 3850 return error; 3851 } 3852 } else { 3853 if (async) { 3854 WPI_NT_LOCK(sc); 3855 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3856 sizeof (struct wpi_rxon), async); 3857 if (error == 0) 3858 wpi_clear_node_table(sc); 3859 WPI_NT_UNLOCK(sc); 3860 } else { 3861 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3862 sizeof (struct wpi_rxon), async); 3863 if (error == 0) 3864 wpi_clear_node_table(sc); 3865 } 3866 3867 if (error != 0) { 3868 device_printf(sc->sc_dev, 3869 "RXON command failed, error %d\n", error); 3870 return error; 3871 } 3872 3873 /* Add broadcast node. */ 3874 error = wpi_add_broadcast_node(sc, async); 3875 if (error != 0) { 3876 device_printf(sc->sc_dev, 3877 "could not add broadcast node, error %d\n", error); 3878 return error; 3879 } 3880 } 3881 3882 /* Configuration has changed, set Tx power accordingly. */ 3883 if ((error = wpi_set_txpower(sc, async)) != 0) { 3884 device_printf(sc->sc_dev, 3885 "%s: could not set TX power, error %d\n", __func__, error); 3886 return error; 3887 } 3888 3889 return 0; 3890 } 3891 3892 /** 3893 * Configure the card to listen to a particular channel, this transisions the 3894 * card in to being able to receive frames from remote devices. 3895 */ 3896 static int 3897 wpi_config(struct wpi_softc *sc) 3898 { 3899 struct ifnet *ifp = sc->sc_ifp; 3900 struct ieee80211com *ic = ifp->if_l2com; 3901 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3902 struct ieee80211_channel *c = ic->ic_curchan; 3903 int error; 3904 3905 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3906 3907 /* Set power saving level to CAM during initialization. */ 3908 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3909 device_printf(sc->sc_dev, 3910 "%s: could not set power saving level\n", __func__); 3911 return error; 3912 } 3913 3914 /* Configure bluetooth coexistence. */ 3915 if ((error = wpi_send_btcoex(sc)) != 0) { 3916 device_printf(sc->sc_dev, 3917 "could not configure bluetooth coexistence\n"); 3918 return error; 3919 } 3920 3921 /* Configure adapter. */ 3922 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3923 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3924 3925 /* Set default channel. */ 3926 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3927 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3928 if (IEEE80211_IS_CHAN_2GHZ(c)) 3929 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3930 3931 sc->rxon.filter = WPI_FILTER_MULTICAST; 3932 switch (ic->ic_opmode) { 3933 case IEEE80211_M_STA: 3934 sc->rxon.mode = WPI_MODE_STA; 3935 break; 3936 case IEEE80211_M_IBSS: 3937 sc->rxon.mode = WPI_MODE_IBSS; 3938 sc->rxon.filter |= WPI_FILTER_BEACON; 3939 break; 3940 case IEEE80211_M_HOSTAP: 3941 /* XXX workaround for beaconing */ 3942 sc->rxon.mode = WPI_MODE_IBSS; 3943 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3944 break; 3945 case IEEE80211_M_AHDEMO: 3946 sc->rxon.mode = WPI_MODE_HOSTAP; 3947 break; 3948 case IEEE80211_M_MONITOR: 3949 sc->rxon.mode = WPI_MODE_MONITOR; 3950 break; 3951 default: 3952 device_printf(sc->sc_dev, "unknown opmode %d\n", 3953 ic->ic_opmode); 3954 return EINVAL; 3955 } 3956 sc->rxon.filter = htole32(sc->rxon.filter); 3957 wpi_set_promisc(sc); 3958 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3959 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3960 3961 /* XXX Current configuration may be unusable. */ 3962 if (IEEE80211_IS_CHAN_NOADHOC(c) && sc->rxon.mode == WPI_MODE_IBSS) { 3963 device_printf(sc->sc_dev, 3964 "%s: invalid channel (%d) selected for IBSS mode\n", 3965 __func__, ieee80211_chan2ieee(ic, c)); 3966 return EINVAL; 3967 } 3968 3969 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3970 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3971 __func__); 3972 return error; 3973 } 3974 3975 /* Setup rate scalling. */ 3976 if ((error = wpi_mrr_setup(sc)) != 0) { 3977 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3978 error); 3979 return error; 3980 } 3981 3982 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3983 3984 return 0; 3985 } 3986 3987 static uint16_t 3988 wpi_get_active_dwell_time(struct wpi_softc *sc, 3989 struct ieee80211_channel *c, uint8_t n_probes) 3990 { 3991 /* No channel? Default to 2GHz settings. */ 3992 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3993 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3994 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3995 } 3996 3997 /* 5GHz dwell time. */ 3998 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3999 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4000 } 4001 4002 /* 4003 * Limit the total dwell time. 4004 * 4005 * Returns the dwell time in milliseconds. 4006 */ 4007 static uint16_t 4008 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4009 { 4010 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4011 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4012 int bintval = 0; 4013 4014 /* bintval is in TU (1.024mS) */ 4015 if (vap != NULL) 4016 bintval = vap->iv_bss->ni_intval; 4017 4018 /* 4019 * If it's non-zero, we should calculate the minimum of 4020 * it and the DWELL_BASE. 4021 * 4022 * XXX Yes, the math should take into account that bintval 4023 * is 1.024mS, not 1mS.. 4024 */ 4025 if (bintval > 0) { 4026 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4027 bintval); 4028 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4029 } 4030 4031 /* No association context? Default. */ 4032 return dwell_time; 4033 } 4034 4035 static uint16_t 4036 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4037 { 4038 uint16_t passive; 4039 4040 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4041 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4042 else 4043 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4044 4045 /* Clamp to the beacon interval if we're associated. */ 4046 return (wpi_limit_dwell(sc, passive)); 4047 } 4048 4049 static uint32_t 4050 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4051 { 4052 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4053 uint32_t nbeacons = time / bintval; 4054 4055 if (mod > WPI_PAUSE_MAX_TIME) 4056 mod = WPI_PAUSE_MAX_TIME; 4057 4058 return WPI_PAUSE_SCAN(nbeacons, mod); 4059 } 4060 4061 /* 4062 * Send a scan request to the firmware. 4063 */ 4064 static int 4065 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4066 { 4067 struct ifnet *ifp = sc->sc_ifp; 4068 struct ieee80211com *ic = ifp->if_l2com; 4069 struct ieee80211_scan_state *ss = ic->ic_scan; 4070 struct ieee80211vap *vap = ss->ss_vap; 4071 struct wpi_scan_hdr *hdr; 4072 struct wpi_cmd_data *tx; 4073 struct wpi_scan_essid *essids; 4074 struct wpi_scan_chan *chan; 4075 struct ieee80211_frame *wh; 4076 struct ieee80211_rateset *rs; 4077 uint16_t dwell_active, dwell_passive; 4078 uint8_t *buf, *frm; 4079 int bgscan, bintval, buflen, error, i, nssid; 4080 4081 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4082 4083 /* 4084 * We are absolutely not allowed to send a scan command when another 4085 * scan command is pending. 4086 */ 4087 if (callout_pending(&sc->scan_timeout)) { 4088 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4089 __func__); 4090 error = EAGAIN; 4091 goto fail; 4092 } 4093 4094 bgscan = wpi_check_bss_filter(sc); 4095 bintval = vap->iv_bss->ni_intval; 4096 if (bgscan != 0 && 4097 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4098 error = EOPNOTSUPP; 4099 goto fail; 4100 } 4101 4102 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4103 if (buf == NULL) { 4104 device_printf(sc->sc_dev, 4105 "%s: could not allocate buffer for scan command\n", 4106 __func__); 4107 error = ENOMEM; 4108 goto fail; 4109 } 4110 hdr = (struct wpi_scan_hdr *)buf; 4111 4112 /* 4113 * Move to the next channel if no packets are received within 10 msecs 4114 * after sending the probe request. 4115 */ 4116 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4117 hdr->quiet_threshold = htole16(1); 4118 4119 if (bgscan != 0) { 4120 /* 4121 * Max needs to be greater than active and passive and quiet! 4122 * It's also in microseconds! 4123 */ 4124 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4125 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4126 bintval)); 4127 } 4128 4129 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4130 4131 tx = (struct wpi_cmd_data *)(hdr + 1); 4132 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4133 tx->id = WPI_ID_BROADCAST; 4134 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4135 4136 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4137 /* Send probe requests at 6Mbps. */ 4138 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4139 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4140 } else { 4141 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4142 /* Send probe requests at 1Mbps. */ 4143 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4144 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4145 } 4146 4147 essids = (struct wpi_scan_essid *)(tx + 1); 4148 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4149 for (i = 0; i < nssid; i++) { 4150 essids[i].id = IEEE80211_ELEMID_SSID; 4151 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4152 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4153 #ifdef WPI_DEBUG 4154 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4155 printf("Scanning Essid: "); 4156 ieee80211_print_essid(essids[i].data, essids[i].len); 4157 printf("\n"); 4158 } 4159 #endif 4160 } 4161 4162 /* 4163 * Build a probe request frame. Most of the following code is a 4164 * copy & paste of what is done in net80211. 4165 */ 4166 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4167 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4168 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4169 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4170 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4171 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4172 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4173 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ 4174 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ 4175 4176 frm = (uint8_t *)(wh + 1); 4177 frm = ieee80211_add_ssid(frm, NULL, 0); 4178 frm = ieee80211_add_rates(frm, rs); 4179 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4180 frm = ieee80211_add_xrates(frm, rs); 4181 4182 /* Set length of probe request. */ 4183 tx->len = htole16(frm - (uint8_t *)wh); 4184 4185 /* 4186 * Construct information about the channel that we 4187 * want to scan. The firmware expects this to be directly 4188 * after the scan probe request 4189 */ 4190 chan = (struct wpi_scan_chan *)frm; 4191 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4192 chan->flags = 0; 4193 if (nssid) { 4194 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4195 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4196 } else 4197 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4198 4199 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4200 chan->flags |= WPI_CHAN_ACTIVE; 4201 4202 /* 4203 * Calculate the active/passive dwell times. 4204 */ 4205 4206 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4207 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4208 4209 /* Make sure they're valid. */ 4210 if (dwell_active > dwell_passive) 4211 dwell_active = dwell_passive; 4212 4213 chan->active = htole16(dwell_active); 4214 chan->passive = htole16(dwell_passive); 4215 4216 chan->dsp_gain = 0x6e; /* Default level */ 4217 4218 if (IEEE80211_IS_CHAN_5GHZ(c)) 4219 chan->rf_gain = 0x3b; 4220 else 4221 chan->rf_gain = 0x28; 4222 4223 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4224 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4225 4226 hdr->nchan++; 4227 4228 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4229 /* XXX Force probe request transmission. */ 4230 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4231 4232 chan++; 4233 4234 /* Reduce unnecessary delay. */ 4235 chan->flags = 0; 4236 chan->passive = chan->active = hdr->quiet_time; 4237 4238 hdr->nchan++; 4239 } 4240 4241 chan++; 4242 4243 buflen = (uint8_t *)chan - buf; 4244 hdr->len = htole16(buflen); 4245 4246 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4247 hdr->nchan); 4248 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4249 free(buf, M_DEVBUF); 4250 4251 if (error != 0) 4252 goto fail; 4253 4254 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4255 4256 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4257 4258 return 0; 4259 4260 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4261 4262 return error; 4263 } 4264 4265 static int 4266 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4267 { 4268 struct ieee80211com *ic = vap->iv_ic; 4269 struct ieee80211_node *ni = vap->iv_bss; 4270 struct ieee80211_channel *c = ni->ni_chan; 4271 int error; 4272 4273 WPI_RXON_LOCK(sc); 4274 4275 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4276 4277 /* Update adapter configuration. */ 4278 sc->rxon.associd = 0; 4279 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4280 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4281 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4282 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4283 if (IEEE80211_IS_CHAN_2GHZ(c)) 4284 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4285 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4286 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4287 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4288 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4289 if (IEEE80211_IS_CHAN_A(c)) { 4290 sc->rxon.cck_mask = 0; 4291 sc->rxon.ofdm_mask = 0x15; 4292 } else if (IEEE80211_IS_CHAN_B(c)) { 4293 sc->rxon.cck_mask = 0x03; 4294 sc->rxon.ofdm_mask = 0; 4295 } else { 4296 /* Assume 802.11b/g. */ 4297 sc->rxon.cck_mask = 0x0f; 4298 sc->rxon.ofdm_mask = 0x15; 4299 } 4300 4301 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4302 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4303 sc->rxon.ofdm_mask); 4304 4305 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4306 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4307 __func__); 4308 } 4309 4310 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4311 4312 WPI_RXON_UNLOCK(sc); 4313 4314 return error; 4315 } 4316 4317 static int 4318 wpi_config_beacon(struct wpi_vap *wvp) 4319 { 4320 struct ieee80211com *ic = wvp->wv_vap.iv_ic; 4321 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4322 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4323 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4324 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4325 struct ieee80211_tim_ie *tie; 4326 struct mbuf *m; 4327 uint8_t *ptr; 4328 int error; 4329 4330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4331 4332 WPI_VAP_LOCK_ASSERT(wvp); 4333 4334 cmd->len = htole16(bcn->m->m_pkthdr.len); 4335 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4336 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4337 4338 /* XXX seems to be unused */ 4339 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4340 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4341 ptr = mtod(bcn->m, uint8_t *); 4342 4343 cmd->tim = htole16(bo->bo_tim - ptr); 4344 cmd->timsz = tie->tim_len; 4345 } 4346 4347 /* Necessary for recursion in ieee80211_beacon_update(). */ 4348 m = bcn->m; 4349 bcn->m = m_dup(m, M_NOWAIT); 4350 if (bcn->m == NULL) { 4351 device_printf(sc->sc_dev, 4352 "%s: could not copy beacon frame\n", __func__); 4353 error = ENOMEM; 4354 goto end; 4355 } 4356 4357 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4358 device_printf(sc->sc_dev, 4359 "%s: could not update beacon frame, error %d", __func__, 4360 error); 4361 } 4362 4363 /* Restore mbuf. */ 4364 end: bcn->m = m; 4365 4366 return error; 4367 } 4368 4369 static int 4370 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4371 { 4372 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 4373 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4374 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4375 struct mbuf *m; 4376 int error; 4377 4378 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4379 4380 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4381 return EINVAL; 4382 4383 m = ieee80211_beacon_alloc(ni, bo); 4384 if (m == NULL) { 4385 device_printf(sc->sc_dev, 4386 "%s: could not allocate beacon frame\n", __func__); 4387 return ENOMEM; 4388 } 4389 4390 WPI_VAP_LOCK(wvp); 4391 if (bcn->m != NULL) 4392 m_freem(bcn->m); 4393 4394 bcn->m = m; 4395 4396 error = wpi_config_beacon(wvp); 4397 WPI_VAP_UNLOCK(wvp); 4398 4399 return error; 4400 } 4401 4402 static void 4403 wpi_update_beacon(struct ieee80211vap *vap, int item) 4404 { 4405 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4406 struct wpi_vap *wvp = WPI_VAP(vap); 4407 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4408 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4409 struct ieee80211_node *ni = vap->iv_bss; 4410 int mcast = 0; 4411 4412 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4413 4414 WPI_VAP_LOCK(wvp); 4415 if (bcn->m == NULL) { 4416 bcn->m = ieee80211_beacon_alloc(ni, bo); 4417 if (bcn->m == NULL) { 4418 device_printf(sc->sc_dev, 4419 "%s: could not allocate beacon frame\n", __func__); 4420 4421 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4422 __func__); 4423 4424 WPI_VAP_UNLOCK(wvp); 4425 return; 4426 } 4427 } 4428 WPI_VAP_UNLOCK(wvp); 4429 4430 if (item == IEEE80211_BEACON_TIM) 4431 mcast = 1; /* TODO */ 4432 4433 setbit(bo->bo_flags, item); 4434 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4435 4436 WPI_VAP_LOCK(wvp); 4437 wpi_config_beacon(wvp); 4438 WPI_VAP_UNLOCK(wvp); 4439 4440 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4441 } 4442 4443 static void 4444 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4445 { 4446 struct ieee80211vap *vap = ni->ni_vap; 4447 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4448 struct wpi_node *wn = WPI_NODE(ni); 4449 int error; 4450 4451 WPI_NT_LOCK(sc); 4452 4453 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4454 4455 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4456 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4457 device_printf(sc->sc_dev, 4458 "%s: could not add IBSS node, error %d\n", 4459 __func__, error); 4460 } 4461 } 4462 WPI_NT_UNLOCK(sc); 4463 } 4464 4465 static int 4466 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4467 { 4468 struct ieee80211com *ic = vap->iv_ic; 4469 struct ieee80211_node *ni = vap->iv_bss; 4470 struct ieee80211_channel *c = ni->ni_chan; 4471 int error; 4472 4473 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4474 4475 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4476 /* Link LED blinks while monitoring. */ 4477 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4478 return 0; 4479 } 4480 4481 /* XXX kernel panic workaround */ 4482 if (c == IEEE80211_CHAN_ANYC) { 4483 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4484 __func__); 4485 return EINVAL; 4486 } 4487 4488 if ((error = wpi_set_timing(sc, ni)) != 0) { 4489 device_printf(sc->sc_dev, 4490 "%s: could not set timing, error %d\n", __func__, error); 4491 return error; 4492 } 4493 4494 /* Update adapter configuration. */ 4495 WPI_RXON_LOCK(sc); 4496 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4497 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4498 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4499 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4500 if (IEEE80211_IS_CHAN_2GHZ(c)) 4501 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4502 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4503 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4504 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4505 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4506 if (IEEE80211_IS_CHAN_A(c)) { 4507 sc->rxon.cck_mask = 0; 4508 sc->rxon.ofdm_mask = 0x15; 4509 } else if (IEEE80211_IS_CHAN_B(c)) { 4510 sc->rxon.cck_mask = 0x03; 4511 sc->rxon.ofdm_mask = 0; 4512 } else { 4513 /* Assume 802.11b/g. */ 4514 sc->rxon.cck_mask = 0x0f; 4515 sc->rxon.ofdm_mask = 0x15; 4516 } 4517 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4518 4519 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4520 sc->rxon.chan, sc->rxon.flags); 4521 4522 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4523 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4524 __func__); 4525 return error; 4526 } 4527 4528 /* Start periodic calibration timer. */ 4529 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4530 4531 WPI_RXON_UNLOCK(sc); 4532 4533 if (vap->iv_opmode == IEEE80211_M_IBSS || 4534 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4535 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4536 device_printf(sc->sc_dev, 4537 "%s: could not setup beacon, error %d\n", __func__, 4538 error); 4539 return error; 4540 } 4541 } 4542 4543 if (vap->iv_opmode == IEEE80211_M_STA) { 4544 /* Add BSS node. */ 4545 WPI_NT_LOCK(sc); 4546 error = wpi_add_sta_node(sc, ni); 4547 WPI_NT_UNLOCK(sc); 4548 if (error != 0) { 4549 device_printf(sc->sc_dev, 4550 "%s: could not add BSS node, error %d\n", __func__, 4551 error); 4552 return error; 4553 } 4554 } 4555 4556 /* Link LED always on while associated. */ 4557 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4558 4559 /* Enable power-saving mode if requested by user. */ 4560 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4561 vap->iv_opmode != IEEE80211_M_IBSS) 4562 (void)wpi_set_pslevel(sc, 0, 3, 1); 4563 4564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4565 4566 return 0; 4567 } 4568 4569 static int 4570 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4571 { 4572 const struct ieee80211_cipher *cip = k->wk_cipher; 4573 struct ieee80211vap *vap = ni->ni_vap; 4574 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4575 struct wpi_node *wn = WPI_NODE(ni); 4576 struct wpi_node_info node; 4577 uint16_t kflags; 4578 int error; 4579 4580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4581 4582 if (wpi_check_node_entry(sc, wn->id) == 0) { 4583 device_printf(sc->sc_dev, "%s: node does not exist\n", 4584 __func__); 4585 return 0; 4586 } 4587 4588 switch (cip->ic_cipher) { 4589 case IEEE80211_CIPHER_AES_CCM: 4590 kflags = WPI_KFLAG_CCMP; 4591 break; 4592 4593 default: 4594 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4595 cip->ic_cipher); 4596 return 0; 4597 } 4598 4599 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4600 if (k->wk_flags & IEEE80211_KEY_GROUP) 4601 kflags |= WPI_KFLAG_MULTICAST; 4602 4603 memset(&node, 0, sizeof node); 4604 node.id = wn->id; 4605 node.control = WPI_NODE_UPDATE; 4606 node.flags = WPI_FLAG_KEY_SET; 4607 node.kflags = htole16(kflags); 4608 memcpy(node.key, k->wk_key, k->wk_keylen); 4609 again: 4610 DPRINTF(sc, WPI_DEBUG_KEY, 4611 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4612 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4613 node.id, ether_sprintf(ni->ni_macaddr)); 4614 4615 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4616 if (error != 0) { 4617 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4618 error); 4619 return !error; 4620 } 4621 4622 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4623 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4624 kflags |= WPI_KFLAG_MULTICAST; 4625 node.kflags = htole16(kflags); 4626 4627 goto again; 4628 } 4629 4630 return 1; 4631 } 4632 4633 static void 4634 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4635 { 4636 const struct ieee80211_key *k = arg; 4637 struct ieee80211vap *vap = ni->ni_vap; 4638 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4639 struct wpi_node *wn = WPI_NODE(ni); 4640 int error; 4641 4642 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4643 return; 4644 4645 WPI_NT_LOCK(sc); 4646 error = wpi_load_key(ni, k); 4647 WPI_NT_UNLOCK(sc); 4648 4649 if (error == 0) { 4650 device_printf(sc->sc_dev, "%s: error while setting key\n", 4651 __func__); 4652 } 4653 } 4654 4655 static int 4656 wpi_set_global_keys(struct ieee80211_node *ni) 4657 { 4658 struct ieee80211vap *vap = ni->ni_vap; 4659 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4660 int error = 1; 4661 4662 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4663 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4664 error = wpi_load_key(ni, wk); 4665 4666 return !error; 4667 } 4668 4669 static int 4670 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4671 { 4672 struct ieee80211vap *vap = ni->ni_vap; 4673 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4674 struct wpi_node *wn = WPI_NODE(ni); 4675 struct wpi_node_info node; 4676 uint16_t kflags; 4677 int error; 4678 4679 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4680 4681 if (wpi_check_node_entry(sc, wn->id) == 0) { 4682 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4683 return 1; /* Nothing to do. */ 4684 } 4685 4686 kflags = WPI_KFLAG_KID(k->wk_keyix); 4687 if (k->wk_flags & IEEE80211_KEY_GROUP) 4688 kflags |= WPI_KFLAG_MULTICAST; 4689 4690 memset(&node, 0, sizeof node); 4691 node.id = wn->id; 4692 node.control = WPI_NODE_UPDATE; 4693 node.flags = WPI_FLAG_KEY_SET; 4694 node.kflags = htole16(kflags); 4695 again: 4696 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4697 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4698 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4699 4700 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4701 if (error != 0) { 4702 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4703 error); 4704 return !error; 4705 } 4706 4707 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4708 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4709 kflags |= WPI_KFLAG_MULTICAST; 4710 node.kflags = htole16(kflags); 4711 4712 goto again; 4713 } 4714 4715 return 1; 4716 } 4717 4718 static void 4719 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4720 { 4721 const struct ieee80211_key *k = arg; 4722 struct ieee80211vap *vap = ni->ni_vap; 4723 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4724 struct wpi_node *wn = WPI_NODE(ni); 4725 int error; 4726 4727 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4728 return; 4729 4730 WPI_NT_LOCK(sc); 4731 error = wpi_del_key(ni, k); 4732 WPI_NT_UNLOCK(sc); 4733 4734 if (error == 0) { 4735 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4736 __func__); 4737 } 4738 } 4739 4740 static int 4741 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4742 int set) 4743 { 4744 struct ieee80211com *ic = vap->iv_ic; 4745 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4746 struct wpi_vap *wvp = WPI_VAP(vap); 4747 struct ieee80211_node *ni; 4748 int error, ni_ref = 0; 4749 4750 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4751 4752 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4753 /* Not for us. */ 4754 return 1; 4755 } 4756 4757 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4758 /* XMIT keys are handled in wpi_tx_data(). */ 4759 return 1; 4760 } 4761 4762 /* Handle group keys. */ 4763 if (&vap->iv_nw_keys[0] <= k && 4764 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4765 WPI_NT_LOCK(sc); 4766 if (set) 4767 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4768 else 4769 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4770 WPI_NT_UNLOCK(sc); 4771 4772 if (vap->iv_state == IEEE80211_S_RUN) { 4773 ieee80211_iterate_nodes(&ic->ic_sta, 4774 set ? wpi_load_key_cb : wpi_del_key_cb, 4775 __DECONST(void *, k)); 4776 } 4777 4778 return 1; 4779 } 4780 4781 switch (vap->iv_opmode) { 4782 case IEEE80211_M_STA: 4783 ni = vap->iv_bss; 4784 break; 4785 4786 case IEEE80211_M_IBSS: 4787 case IEEE80211_M_AHDEMO: 4788 case IEEE80211_M_HOSTAP: 4789 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4790 if (ni == NULL) 4791 return 0; /* should not happen */ 4792 4793 ni_ref = 1; 4794 break; 4795 4796 default: 4797 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4798 vap->iv_opmode); 4799 return 0; 4800 } 4801 4802 WPI_NT_LOCK(sc); 4803 if (set) 4804 error = wpi_load_key(ni, k); 4805 else 4806 error = wpi_del_key(ni, k); 4807 WPI_NT_UNLOCK(sc); 4808 4809 if (ni_ref) 4810 ieee80211_node_decref(ni); 4811 4812 return error; 4813 } 4814 4815 static int 4816 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4817 const uint8_t mac[IEEE80211_ADDR_LEN]) 4818 { 4819 return wpi_process_key(vap, k, 1); 4820 } 4821 4822 static int 4823 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4824 { 4825 return wpi_process_key(vap, k, 0); 4826 } 4827 4828 /* 4829 * This function is called after the runtime firmware notifies us of its 4830 * readiness (called in a process context). 4831 */ 4832 static int 4833 wpi_post_alive(struct wpi_softc *sc) 4834 { 4835 int ntries, error; 4836 4837 /* Check (again) that the radio is not disabled. */ 4838 if ((error = wpi_nic_lock(sc)) != 0) 4839 return error; 4840 4841 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4842 4843 /* NB: Runtime firmware must be up and running. */ 4844 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4845 device_printf(sc->sc_dev, 4846 "RF switch: radio disabled (%s)\n", __func__); 4847 wpi_nic_unlock(sc); 4848 return EPERM; /* :-) */ 4849 } 4850 wpi_nic_unlock(sc); 4851 4852 /* Wait for thermal sensor to calibrate. */ 4853 for (ntries = 0; ntries < 1000; ntries++) { 4854 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4855 break; 4856 DELAY(10); 4857 } 4858 4859 if (ntries == 1000) { 4860 device_printf(sc->sc_dev, 4861 "timeout waiting for thermal sensor calibration\n"); 4862 return ETIMEDOUT; 4863 } 4864 4865 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4866 return 0; 4867 } 4868 4869 /* 4870 * The firmware boot code is small and is intended to be copied directly into 4871 * the NIC internal memory (no DMA transfer). 4872 */ 4873 static int 4874 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4875 { 4876 int error, ntries; 4877 4878 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4879 4880 size /= sizeof (uint32_t); 4881 4882 if ((error = wpi_nic_lock(sc)) != 0) 4883 return error; 4884 4885 /* Copy microcode image into NIC memory. */ 4886 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4887 (const uint32_t *)ucode, size); 4888 4889 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4890 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4891 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4892 4893 /* Start boot load now. */ 4894 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4895 4896 /* Wait for transfer to complete. */ 4897 for (ntries = 0; ntries < 1000; ntries++) { 4898 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4899 DPRINTF(sc, WPI_DEBUG_HW, 4900 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4901 WPI_FH_TX_STATUS_IDLE(6), 4902 status & WPI_FH_TX_STATUS_IDLE(6)); 4903 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4904 DPRINTF(sc, WPI_DEBUG_HW, 4905 "Status Match! - ntries = %d\n", ntries); 4906 break; 4907 } 4908 DELAY(10); 4909 } 4910 if (ntries == 1000) { 4911 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4912 __func__); 4913 wpi_nic_unlock(sc); 4914 return ETIMEDOUT; 4915 } 4916 4917 /* Enable boot after power up. */ 4918 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4919 4920 wpi_nic_unlock(sc); 4921 return 0; 4922 } 4923 4924 static int 4925 wpi_load_firmware(struct wpi_softc *sc) 4926 { 4927 struct wpi_fw_info *fw = &sc->fw; 4928 struct wpi_dma_info *dma = &sc->fw_dma; 4929 int error; 4930 4931 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4932 4933 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4934 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4935 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4936 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4937 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4938 4939 /* Tell adapter where to find initialization sections. */ 4940 if ((error = wpi_nic_lock(sc)) != 0) 4941 return error; 4942 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4943 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4944 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4945 dma->paddr + WPI_FW_DATA_MAXSZ); 4946 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4947 wpi_nic_unlock(sc); 4948 4949 /* Load firmware boot code. */ 4950 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4951 if (error != 0) { 4952 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4953 __func__); 4954 return error; 4955 } 4956 4957 /* Now press "execute". */ 4958 WPI_WRITE(sc, WPI_RESET, 0); 4959 4960 /* Wait at most one second for first alive notification. */ 4961 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4962 device_printf(sc->sc_dev, 4963 "%s: timeout waiting for adapter to initialize, error %d\n", 4964 __func__, error); 4965 return error; 4966 } 4967 4968 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4969 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4970 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4971 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4972 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4973 4974 /* Tell adapter where to find runtime sections. */ 4975 if ((error = wpi_nic_lock(sc)) != 0) 4976 return error; 4977 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4978 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4979 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4980 dma->paddr + WPI_FW_DATA_MAXSZ); 4981 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4982 WPI_FW_UPDATED | fw->main.textsz); 4983 wpi_nic_unlock(sc); 4984 4985 return 0; 4986 } 4987 4988 static int 4989 wpi_read_firmware(struct wpi_softc *sc) 4990 { 4991 const struct firmware *fp; 4992 struct wpi_fw_info *fw = &sc->fw; 4993 const struct wpi_firmware_hdr *hdr; 4994 int error; 4995 4996 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4997 4998 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4999 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5000 5001 WPI_UNLOCK(sc); 5002 fp = firmware_get(WPI_FW_NAME); 5003 WPI_LOCK(sc); 5004 5005 if (fp == NULL) { 5006 device_printf(sc->sc_dev, 5007 "could not load firmware image '%s'\n", WPI_FW_NAME); 5008 return EINVAL; 5009 } 5010 5011 sc->fw_fp = fp; 5012 5013 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5014 device_printf(sc->sc_dev, 5015 "firmware file too short: %zu bytes\n", fp->datasize); 5016 error = EINVAL; 5017 goto fail; 5018 } 5019 5020 fw->size = fp->datasize; 5021 fw->data = (const uint8_t *)fp->data; 5022 5023 /* Extract firmware header information. */ 5024 hdr = (const struct wpi_firmware_hdr *)fw->data; 5025 5026 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5027 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5028 5029 fw->main.textsz = le32toh(hdr->rtextsz); 5030 fw->main.datasz = le32toh(hdr->rdatasz); 5031 fw->init.textsz = le32toh(hdr->itextsz); 5032 fw->init.datasz = le32toh(hdr->idatasz); 5033 fw->boot.textsz = le32toh(hdr->btextsz); 5034 fw->boot.datasz = 0; 5035 5036 /* Sanity-check firmware header. */ 5037 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5038 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5039 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5040 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5041 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5042 (fw->boot.textsz & 3) != 0) { 5043 device_printf(sc->sc_dev, "invalid firmware header\n"); 5044 error = EINVAL; 5045 goto fail; 5046 } 5047 5048 /* Check that all firmware sections fit. */ 5049 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5050 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5051 device_printf(sc->sc_dev, 5052 "firmware file too short: %zu bytes\n", fw->size); 5053 error = EINVAL; 5054 goto fail; 5055 } 5056 5057 /* Get pointers to firmware sections. */ 5058 fw->main.text = (const uint8_t *)(hdr + 1); 5059 fw->main.data = fw->main.text + fw->main.textsz; 5060 fw->init.text = fw->main.data + fw->main.datasz; 5061 fw->init.data = fw->init.text + fw->init.textsz; 5062 fw->boot.text = fw->init.data + fw->init.datasz; 5063 5064 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5065 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5066 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5067 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5068 fw->main.textsz, fw->main.datasz, 5069 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5070 5071 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5072 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5073 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5074 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5075 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5076 5077 return 0; 5078 5079 fail: wpi_unload_firmware(sc); 5080 return error; 5081 } 5082 5083 /** 5084 * Free the referenced firmware image 5085 */ 5086 static void 5087 wpi_unload_firmware(struct wpi_softc *sc) 5088 { 5089 if (sc->fw_fp != NULL) { 5090 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5091 sc->fw_fp = NULL; 5092 } 5093 } 5094 5095 static int 5096 wpi_clock_wait(struct wpi_softc *sc) 5097 { 5098 int ntries; 5099 5100 /* Set "initialization complete" bit. */ 5101 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5102 5103 /* Wait for clock stabilization. */ 5104 for (ntries = 0; ntries < 2500; ntries++) { 5105 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5106 return 0; 5107 DELAY(100); 5108 } 5109 device_printf(sc->sc_dev, 5110 "%s: timeout waiting for clock stabilization\n", __func__); 5111 5112 return ETIMEDOUT; 5113 } 5114 5115 static int 5116 wpi_apm_init(struct wpi_softc *sc) 5117 { 5118 uint32_t reg; 5119 int error; 5120 5121 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5122 5123 /* Disable L0s exit timer (NMI bug workaround). */ 5124 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5125 /* Don't wait for ICH L0s (ICH bug workaround). */ 5126 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5127 5128 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5129 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5130 5131 /* Retrieve PCIe Active State Power Management (ASPM). */ 5132 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5133 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5134 if (reg & 0x02) /* L1 Entry enabled. */ 5135 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5136 else 5137 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5138 5139 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5140 5141 /* Wait for clock stabilization before accessing prph. */ 5142 if ((error = wpi_clock_wait(sc)) != 0) 5143 return error; 5144 5145 if ((error = wpi_nic_lock(sc)) != 0) 5146 return error; 5147 /* Cleanup. */ 5148 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5149 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5150 5151 /* Enable DMA and BSM (Bootstrap State Machine). */ 5152 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5153 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5154 DELAY(20); 5155 /* Disable L1-Active. */ 5156 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5157 wpi_nic_unlock(sc); 5158 5159 return 0; 5160 } 5161 5162 static void 5163 wpi_apm_stop_master(struct wpi_softc *sc) 5164 { 5165 int ntries; 5166 5167 /* Stop busmaster DMA activity. */ 5168 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5169 5170 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5171 WPI_GP_CNTRL_MAC_PS) 5172 return; /* Already asleep. */ 5173 5174 for (ntries = 0; ntries < 100; ntries++) { 5175 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5176 return; 5177 DELAY(10); 5178 } 5179 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5180 __func__); 5181 } 5182 5183 static void 5184 wpi_apm_stop(struct wpi_softc *sc) 5185 { 5186 wpi_apm_stop_master(sc); 5187 5188 /* Reset the entire device. */ 5189 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5190 DELAY(10); 5191 /* Clear "initialization complete" bit. */ 5192 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5193 } 5194 5195 static void 5196 wpi_nic_config(struct wpi_softc *sc) 5197 { 5198 uint32_t rev; 5199 5200 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5201 5202 /* voodoo from the Linux "driver".. */ 5203 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5204 if ((rev & 0xc0) == 0x40) 5205 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5206 else if (!(rev & 0x80)) 5207 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5208 5209 if (sc->cap == 0x80) 5210 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5211 5212 if ((sc->rev & 0xf0) == 0xd0) 5213 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5214 else 5215 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5216 5217 if (sc->type > 1) 5218 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5219 } 5220 5221 static int 5222 wpi_hw_init(struct wpi_softc *sc) 5223 { 5224 int chnl, ntries, error; 5225 5226 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5227 5228 /* Clear pending interrupts. */ 5229 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5230 5231 if ((error = wpi_apm_init(sc)) != 0) { 5232 device_printf(sc->sc_dev, 5233 "%s: could not power ON adapter, error %d\n", __func__, 5234 error); 5235 return error; 5236 } 5237 5238 /* Select VMAIN power source. */ 5239 if ((error = wpi_nic_lock(sc)) != 0) 5240 return error; 5241 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5242 wpi_nic_unlock(sc); 5243 /* Spin until VMAIN gets selected. */ 5244 for (ntries = 0; ntries < 5000; ntries++) { 5245 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5246 break; 5247 DELAY(10); 5248 } 5249 if (ntries == 5000) { 5250 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5251 return ETIMEDOUT; 5252 } 5253 5254 /* Perform adapter initialization. */ 5255 wpi_nic_config(sc); 5256 5257 /* Initialize RX ring. */ 5258 if ((error = wpi_nic_lock(sc)) != 0) 5259 return error; 5260 /* Set physical address of RX ring. */ 5261 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5262 /* Set physical address of RX read pointer. */ 5263 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5264 offsetof(struct wpi_shared, next)); 5265 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5266 /* Enable RX. */ 5267 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5268 WPI_FH_RX_CONFIG_DMA_ENA | 5269 WPI_FH_RX_CONFIG_RDRBD_ENA | 5270 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5271 WPI_FH_RX_CONFIG_MAXFRAG | 5272 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5273 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5274 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5275 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5276 wpi_nic_unlock(sc); 5277 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5278 5279 /* Initialize TX rings. */ 5280 if ((error = wpi_nic_lock(sc)) != 0) 5281 return error; 5282 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5283 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5284 /* Enable all 6 TX rings. */ 5285 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5286 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5287 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5288 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5289 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5290 /* Set physical address of TX rings. */ 5291 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5292 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5293 5294 /* Enable all DMA channels. */ 5295 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5296 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5297 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5298 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5299 } 5300 wpi_nic_unlock(sc); 5301 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5302 5303 /* Clear "radio off" and "commands blocked" bits. */ 5304 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5305 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5306 5307 /* Clear pending interrupts. */ 5308 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5309 /* Enable interrupts. */ 5310 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5311 5312 /* _Really_ make sure "radio off" bit is cleared! */ 5313 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5314 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5315 5316 if ((error = wpi_load_firmware(sc)) != 0) { 5317 device_printf(sc->sc_dev, 5318 "%s: could not load firmware, error %d\n", __func__, 5319 error); 5320 return error; 5321 } 5322 /* Wait at most one second for firmware alive notification. */ 5323 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5324 device_printf(sc->sc_dev, 5325 "%s: timeout waiting for adapter to initialize, error %d\n", 5326 __func__, error); 5327 return error; 5328 } 5329 5330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5331 5332 /* Do post-firmware initialization. */ 5333 return wpi_post_alive(sc); 5334 } 5335 5336 static void 5337 wpi_hw_stop(struct wpi_softc *sc) 5338 { 5339 int chnl, qid, ntries; 5340 5341 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5342 5343 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5344 wpi_nic_lock(sc); 5345 5346 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5347 5348 /* Disable interrupts. */ 5349 WPI_WRITE(sc, WPI_INT_MASK, 0); 5350 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5351 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5352 5353 /* Make sure we no longer hold the NIC lock. */ 5354 wpi_nic_unlock(sc); 5355 5356 if (wpi_nic_lock(sc) == 0) { 5357 /* Stop TX scheduler. */ 5358 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5359 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5360 5361 /* Stop all DMA channels. */ 5362 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5363 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5364 for (ntries = 0; ntries < 200; ntries++) { 5365 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5366 WPI_FH_TX_STATUS_IDLE(chnl)) 5367 break; 5368 DELAY(10); 5369 } 5370 } 5371 wpi_nic_unlock(sc); 5372 } 5373 5374 /* Stop RX ring. */ 5375 wpi_reset_rx_ring(sc); 5376 5377 /* Reset all TX rings. */ 5378 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5379 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5380 5381 if (wpi_nic_lock(sc) == 0) { 5382 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5383 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5384 wpi_nic_unlock(sc); 5385 } 5386 DELAY(5); 5387 /* Power OFF adapter. */ 5388 wpi_apm_stop(sc); 5389 } 5390 5391 static void 5392 wpi_radio_on(void *arg0, int pending) 5393 { 5394 struct wpi_softc *sc = arg0; 5395 struct ifnet *ifp = sc->sc_ifp; 5396 struct ieee80211com *ic = ifp->if_l2com; 5397 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5398 5399 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5400 5401 if (vap != NULL) { 5402 wpi_init(sc); 5403 ieee80211_init(vap); 5404 } 5405 5406 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) { 5407 WPI_LOCK(sc); 5408 callout_stop(&sc->watchdog_rfkill); 5409 WPI_UNLOCK(sc); 5410 } 5411 } 5412 5413 static void 5414 wpi_radio_off(void *arg0, int pending) 5415 { 5416 struct wpi_softc *sc = arg0; 5417 struct ifnet *ifp = sc->sc_ifp; 5418 struct ieee80211com *ic = ifp->if_l2com; 5419 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5420 5421 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5422 5423 wpi_stop(sc); 5424 if (vap != NULL) 5425 ieee80211_stop(vap); 5426 5427 WPI_LOCK(sc); 5428 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5429 WPI_UNLOCK(sc); 5430 } 5431 5432 static void 5433 wpi_init(void *arg) 5434 { 5435 struct wpi_softc *sc = arg; 5436 struct ifnet *ifp = sc->sc_ifp; 5437 struct ieee80211com *ic = ifp->if_l2com; 5438 int error; 5439 5440 WPI_LOCK(sc); 5441 5442 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5443 5444 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 5445 goto end; 5446 5447 /* Check that the radio is not disabled by hardware switch. */ 5448 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5449 device_printf(sc->sc_dev, 5450 "RF switch: radio disabled (%s)\n", __func__); 5451 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5452 sc); 5453 goto end; 5454 } 5455 5456 /* Read firmware images from the filesystem. */ 5457 if ((error = wpi_read_firmware(sc)) != 0) { 5458 device_printf(sc->sc_dev, 5459 "%s: could not read firmware, error %d\n", __func__, 5460 error); 5461 goto fail; 5462 } 5463 5464 /* Initialize hardware and upload firmware. */ 5465 error = wpi_hw_init(sc); 5466 wpi_unload_firmware(sc); 5467 if (error != 0) { 5468 device_printf(sc->sc_dev, 5469 "%s: could not initialize hardware, error %d\n", __func__, 5470 error); 5471 goto fail; 5472 } 5473 5474 /* Configure adapter now that it is ready. */ 5475 sc->txq_active = 1; 5476 if ((error = wpi_config(sc)) != 0) { 5477 device_printf(sc->sc_dev, 5478 "%s: could not configure device, error %d\n", __func__, 5479 error); 5480 goto fail; 5481 } 5482 5483 IF_LOCK(&ifp->if_snd); 5484 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5485 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5486 IF_UNLOCK(&ifp->if_snd); 5487 5488 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5489 5490 WPI_UNLOCK(sc); 5491 5492 ieee80211_start_all(ic); 5493 5494 return; 5495 5496 fail: wpi_stop_locked(sc); 5497 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5498 WPI_UNLOCK(sc); 5499 } 5500 5501 static void 5502 wpi_stop_locked(struct wpi_softc *sc) 5503 { 5504 struct ifnet *ifp = sc->sc_ifp; 5505 5506 WPI_LOCK_ASSERT(sc); 5507 5508 WPI_TXQ_LOCK(sc); 5509 sc->txq_active = 0; 5510 WPI_TXQ_UNLOCK(sc); 5511 5512 WPI_TXQ_STATE_LOCK(sc); 5513 callout_stop(&sc->tx_timeout); 5514 WPI_TXQ_STATE_UNLOCK(sc); 5515 5516 WPI_RXON_LOCK(sc); 5517 callout_stop(&sc->scan_timeout); 5518 callout_stop(&sc->calib_to); 5519 WPI_RXON_UNLOCK(sc); 5520 5521 IF_LOCK(&ifp->if_snd); 5522 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5523 IF_UNLOCK(&ifp->if_snd); 5524 5525 /* Power OFF hardware. */ 5526 wpi_hw_stop(sc); 5527 } 5528 5529 static void 5530 wpi_stop(struct wpi_softc *sc) 5531 { 5532 WPI_LOCK(sc); 5533 wpi_stop_locked(sc); 5534 WPI_UNLOCK(sc); 5535 } 5536 5537 /* 5538 * Callback from net80211 to start a scan. 5539 */ 5540 static void 5541 wpi_scan_start(struct ieee80211com *ic) 5542 { 5543 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5544 5545 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5546 } 5547 5548 /* 5549 * Callback from net80211 to terminate a scan. 5550 */ 5551 static void 5552 wpi_scan_end(struct ieee80211com *ic) 5553 { 5554 struct ifnet *ifp = ic->ic_ifp; 5555 struct wpi_softc *sc = ifp->if_softc; 5556 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5557 5558 if (vap->iv_state == IEEE80211_S_RUN) 5559 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5560 } 5561 5562 /** 5563 * Called by the net80211 framework to indicate to the driver 5564 * that the channel should be changed 5565 */ 5566 static void 5567 wpi_set_channel(struct ieee80211com *ic) 5568 { 5569 const struct ieee80211_channel *c = ic->ic_curchan; 5570 struct ifnet *ifp = ic->ic_ifp; 5571 struct wpi_softc *sc = ifp->if_softc; 5572 int error; 5573 5574 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5575 5576 WPI_LOCK(sc); 5577 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5578 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5579 WPI_UNLOCK(sc); 5580 WPI_TX_LOCK(sc); 5581 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5582 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5583 WPI_TX_UNLOCK(sc); 5584 5585 /* 5586 * Only need to set the channel in Monitor mode. AP scanning and auth 5587 * are already taken care of by their respective firmware commands. 5588 */ 5589 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5590 WPI_RXON_LOCK(sc); 5591 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5592 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5593 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5594 WPI_RXON_24GHZ); 5595 } else { 5596 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5597 WPI_RXON_24GHZ); 5598 } 5599 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5600 device_printf(sc->sc_dev, 5601 "%s: error %d setting channel\n", __func__, 5602 error); 5603 WPI_RXON_UNLOCK(sc); 5604 } 5605 } 5606 5607 /** 5608 * Called by net80211 to indicate that we need to scan the current 5609 * channel. The channel is previously be set via the wpi_set_channel 5610 * callback. 5611 */ 5612 static void 5613 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5614 { 5615 struct ieee80211vap *vap = ss->ss_vap; 5616 struct ieee80211com *ic = vap->iv_ic; 5617 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5618 int error; 5619 5620 WPI_RXON_LOCK(sc); 5621 error = wpi_scan(sc, ic->ic_curchan); 5622 WPI_RXON_UNLOCK(sc); 5623 if (error != 0) 5624 ieee80211_cancel_scan(vap); 5625 } 5626 5627 /** 5628 * Called by the net80211 framework to indicate 5629 * the minimum dwell time has been met, terminate the scan. 5630 * We don't actually terminate the scan as the firmware will notify 5631 * us when it's finished and we have no way to interrupt it. 5632 */ 5633 static void 5634 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5635 { 5636 /* NB: don't try to abort scan; wait for firmware to finish */ 5637 } 5638 5639 static void 5640 wpi_hw_reset(void *arg, int pending) 5641 { 5642 struct wpi_softc *sc = arg; 5643 struct ifnet *ifp = sc->sc_ifp; 5644 struct ieee80211com *ic = ifp->if_l2com; 5645 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5646 5647 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5648 5649 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5650 ieee80211_cancel_scan(vap); 5651 5652 wpi_stop(sc); 5653 if (vap != NULL) 5654 ieee80211_stop(vap); 5655 wpi_init(sc); 5656 if (vap != NULL) 5657 ieee80211_init(vap); 5658 } 5659