1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_reset_rx_ring(struct wpi_softc *); 156 static void wpi_free_rx_ring(struct wpi_softc *); 157 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 158 int); 159 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 160 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162 static int wpi_read_eeprom(struct wpi_softc *, 163 uint8_t macaddr[IEEE80211_ADDR_LEN]); 164 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 165 static void wpi_read_eeprom_band(struct wpi_softc *, int); 166 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 167 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 168 struct ieee80211_channel *); 169 static int wpi_setregdomain(struct ieee80211com *, 170 struct ieee80211_regdomain *, int, 171 struct ieee80211_channel[]); 172 static int wpi_read_eeprom_group(struct wpi_softc *, int); 173 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 174 static void wpi_node_free(struct ieee80211_node *); 175 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 176 const uint8_t mac[IEEE80211_ADDR_LEN]); 177 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 178 static void wpi_calib_timeout(void *); 179 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 180 struct wpi_rx_data *); 181 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 182 struct wpi_rx_data *); 183 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 184 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 185 static void wpi_notif_intr(struct wpi_softc *); 186 static void wpi_wakeup_intr(struct wpi_softc *); 187 #ifdef WPI_DEBUG 188 static void wpi_debug_registers(struct wpi_softc *); 189 #endif 190 static void wpi_fatal_intr(struct wpi_softc *); 191 static void wpi_intr(void *); 192 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 193 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 194 struct ieee80211_node *); 195 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 196 struct ieee80211_node *, 197 const struct ieee80211_bpf_params *); 198 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 199 const struct ieee80211_bpf_params *); 200 static void wpi_start(struct ifnet *); 201 static void wpi_start_task(void *, int); 202 static void wpi_watchdog_rfkill(void *); 203 static void wpi_scan_timeout(void *); 204 static void wpi_tx_timeout(void *); 205 static int wpi_ioctl(struct ifnet *, u_long, caddr_t); 206 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 207 static int wpi_mrr_setup(struct wpi_softc *); 208 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 209 static int wpi_add_broadcast_node(struct wpi_softc *, int); 210 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 211 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 212 static int wpi_updateedca(struct ieee80211com *); 213 static void wpi_set_promisc(struct wpi_softc *); 214 static void wpi_update_promisc(struct ifnet *); 215 static void wpi_update_mcast(struct ifnet *); 216 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 217 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 218 static void wpi_power_calibration(struct wpi_softc *); 219 static int wpi_set_txpower(struct wpi_softc *, int); 220 static int wpi_get_power_index(struct wpi_softc *, 221 struct wpi_power_group *, struct ieee80211_channel *, int); 222 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 223 static int wpi_send_btcoex(struct wpi_softc *); 224 static int wpi_send_rxon(struct wpi_softc *, int, int); 225 static int wpi_config(struct wpi_softc *); 226 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 227 struct ieee80211_channel *, uint8_t); 228 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 229 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 230 struct ieee80211_channel *); 231 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 232 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 233 static int wpi_config_beacon(struct wpi_vap *); 234 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 235 static void wpi_update_beacon(struct ieee80211vap *, int); 236 static void wpi_newassoc(struct ieee80211_node *, int); 237 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 238 static int wpi_load_key(struct ieee80211_node *, 239 const struct ieee80211_key *); 240 static void wpi_load_key_cb(void *, struct ieee80211_node *); 241 static int wpi_set_global_keys(struct ieee80211_node *); 242 static int wpi_del_key(struct ieee80211_node *, 243 const struct ieee80211_key *); 244 static void wpi_del_key_cb(void *, struct ieee80211_node *); 245 static int wpi_process_key(struct ieee80211vap *, 246 const struct ieee80211_key *, int); 247 static int wpi_key_set(struct ieee80211vap *, 248 const struct ieee80211_key *, 249 const uint8_t mac[IEEE80211_ADDR_LEN]); 250 static int wpi_key_delete(struct ieee80211vap *, 251 const struct ieee80211_key *); 252 static int wpi_post_alive(struct wpi_softc *); 253 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 254 static int wpi_load_firmware(struct wpi_softc *); 255 static int wpi_read_firmware(struct wpi_softc *); 256 static void wpi_unload_firmware(struct wpi_softc *); 257 static int wpi_clock_wait(struct wpi_softc *); 258 static int wpi_apm_init(struct wpi_softc *); 259 static void wpi_apm_stop_master(struct wpi_softc *); 260 static void wpi_apm_stop(struct wpi_softc *); 261 static void wpi_nic_config(struct wpi_softc *); 262 static int wpi_hw_init(struct wpi_softc *); 263 static void wpi_hw_stop(struct wpi_softc *); 264 static void wpi_radio_on(void *, int); 265 static void wpi_radio_off(void *, int); 266 static void wpi_init(void *); 267 static void wpi_stop_locked(struct wpi_softc *); 268 static void wpi_stop(struct wpi_softc *); 269 static void wpi_scan_start(struct ieee80211com *); 270 static void wpi_scan_end(struct ieee80211com *); 271 static void wpi_set_channel(struct ieee80211com *); 272 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 273 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 274 static void wpi_hw_reset(void *, int); 275 276 static device_method_t wpi_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, wpi_probe), 279 DEVMETHOD(device_attach, wpi_attach), 280 DEVMETHOD(device_detach, wpi_detach), 281 DEVMETHOD(device_shutdown, wpi_shutdown), 282 DEVMETHOD(device_suspend, wpi_suspend), 283 DEVMETHOD(device_resume, wpi_resume), 284 285 DEVMETHOD_END 286 }; 287 288 static driver_t wpi_driver = { 289 "wpi", 290 wpi_methods, 291 sizeof (struct wpi_softc) 292 }; 293 static devclass_t wpi_devclass; 294 295 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 296 297 MODULE_VERSION(wpi, 1); 298 299 MODULE_DEPEND(wpi, pci, 1, 1, 1); 300 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 301 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 302 303 static int 304 wpi_probe(device_t dev) 305 { 306 const struct wpi_ident *ident; 307 308 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 309 if (pci_get_vendor(dev) == ident->vendor && 310 pci_get_device(dev) == ident->device) { 311 device_set_desc(dev, ident->name); 312 return (BUS_PROBE_DEFAULT); 313 } 314 } 315 return ENXIO; 316 } 317 318 static int 319 wpi_attach(device_t dev) 320 { 321 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 322 struct ieee80211com *ic; 323 struct ifnet *ifp; 324 int i, error, rid; 325 #ifdef WPI_DEBUG 326 int supportsa = 1; 327 const struct wpi_ident *ident; 328 #endif 329 uint8_t macaddr[IEEE80211_ADDR_LEN]; 330 331 sc->sc_dev = dev; 332 333 #ifdef WPI_DEBUG 334 error = resource_int_value(device_get_name(sc->sc_dev), 335 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 336 if (error != 0) 337 sc->sc_debug = 0; 338 #else 339 sc->sc_debug = 0; 340 #endif 341 342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 343 344 /* 345 * Get the offset of the PCI Express Capability Structure in PCI 346 * Configuration Space. 347 */ 348 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 349 if (error != 0) { 350 device_printf(dev, "PCIe capability structure not found!\n"); 351 return error; 352 } 353 354 /* 355 * Some card's only support 802.11b/g not a, check to see if 356 * this is one such card. A 0x0 in the subdevice table indicates 357 * the entire subdevice range is to be ignored. 358 */ 359 #ifdef WPI_DEBUG 360 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 361 if (ident->subdevice && 362 pci_get_subdevice(dev) == ident->subdevice) { 363 supportsa = 0; 364 break; 365 } 366 } 367 #endif 368 369 /* Clear device-specific "PCI retry timeout" register (41h). */ 370 pci_write_config(dev, 0x41, 0, 1); 371 372 /* Enable bus-mastering. */ 373 pci_enable_busmaster(dev); 374 375 rid = PCIR_BAR(0); 376 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 377 RF_ACTIVE); 378 if (sc->mem == NULL) { 379 device_printf(dev, "can't map mem space\n"); 380 return ENOMEM; 381 } 382 sc->sc_st = rman_get_bustag(sc->mem); 383 sc->sc_sh = rman_get_bushandle(sc->mem); 384 385 i = 1; 386 rid = 0; 387 if (pci_alloc_msi(dev, &i) == 0) 388 rid = 1; 389 /* Install interrupt handler. */ 390 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 391 (rid != 0 ? 0 : RF_SHAREABLE)); 392 if (sc->irq == NULL) { 393 device_printf(dev, "can't map interrupt\n"); 394 error = ENOMEM; 395 goto fail; 396 } 397 398 WPI_LOCK_INIT(sc); 399 WPI_TX_LOCK_INIT(sc); 400 WPI_RXON_LOCK_INIT(sc); 401 WPI_NT_LOCK_INIT(sc); 402 WPI_TXQ_LOCK_INIT(sc); 403 WPI_TXQ_STATE_LOCK_INIT(sc); 404 405 /* Allocate DMA memory for firmware transfers. */ 406 if ((error = wpi_alloc_fwmem(sc)) != 0) { 407 device_printf(dev, 408 "could not allocate memory for firmware, error %d\n", 409 error); 410 goto fail; 411 } 412 413 /* Allocate shared page. */ 414 if ((error = wpi_alloc_shared(sc)) != 0) { 415 device_printf(dev, "could not allocate shared page\n"); 416 goto fail; 417 } 418 419 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 420 for (i = 0; i < WPI_NTXQUEUES; i++) { 421 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 422 device_printf(dev, 423 "could not allocate TX ring %d, error %d\n", i, 424 error); 425 goto fail; 426 } 427 } 428 429 /* Allocate RX ring. */ 430 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 431 device_printf(dev, "could not allocate RX ring, error %d\n", 432 error); 433 goto fail; 434 } 435 436 /* Clear pending interrupts. */ 437 WPI_WRITE(sc, WPI_INT, 0xffffffff); 438 439 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 440 if (ifp == NULL) { 441 device_printf(dev, "can not allocate ifnet structure\n"); 442 goto fail; 443 } 444 445 ic = ifp->if_l2com; 446 ic->ic_ifp = ifp; 447 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 448 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 449 450 /* Set device capabilities. */ 451 ic->ic_caps = 452 IEEE80211_C_STA /* station mode supported */ 453 | IEEE80211_C_IBSS /* IBSS mode supported */ 454 | IEEE80211_C_HOSTAP /* Host access point mode */ 455 | IEEE80211_C_MONITOR /* monitor mode supported */ 456 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 457 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 458 | IEEE80211_C_TXPMGT /* tx power management */ 459 | IEEE80211_C_SHSLOT /* short slot time supported */ 460 | IEEE80211_C_WPA /* 802.11i */ 461 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 462 | IEEE80211_C_WME /* 802.11e */ 463 | IEEE80211_C_PMGT /* Station-side power mgmt */ 464 ; 465 466 ic->ic_cryptocaps = 467 IEEE80211_CRYPTO_AES_CCM; 468 469 /* 470 * Read in the eeprom and also setup the channels for 471 * net80211. We don't set the rates as net80211 does this for us 472 */ 473 if ((error = wpi_read_eeprom(sc, macaddr)) != 0) { 474 device_printf(dev, "could not read EEPROM, error %d\n", 475 error); 476 goto fail; 477 } 478 479 #ifdef WPI_DEBUG 480 if (bootverbose) { 481 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 482 sc->domain); 483 device_printf(sc->sc_dev, "Hardware Type: %c\n", 484 sc->type > 1 ? 'B': '?'); 485 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 486 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 487 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 488 supportsa ? "does" : "does not"); 489 490 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 491 check what sc->rev really represents - benjsc 20070615 */ 492 } 493 #endif 494 495 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 496 ifp->if_softc = sc; 497 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 498 ifp->if_init = wpi_init; 499 ifp->if_ioctl = wpi_ioctl; 500 ifp->if_start = wpi_start; 501 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 502 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 503 IFQ_SET_READY(&ifp->if_snd); 504 505 ieee80211_ifattach(ic, macaddr); 506 ic->ic_vap_create = wpi_vap_create; 507 ic->ic_vap_delete = wpi_vap_delete; 508 ic->ic_raw_xmit = wpi_raw_xmit; 509 ic->ic_node_alloc = wpi_node_alloc; 510 sc->sc_node_free = ic->ic_node_free; 511 ic->ic_node_free = wpi_node_free; 512 ic->ic_wme.wme_update = wpi_updateedca; 513 ic->ic_update_promisc = wpi_update_promisc; 514 ic->ic_update_mcast = wpi_update_mcast; 515 ic->ic_newassoc = wpi_newassoc; 516 ic->ic_scan_start = wpi_scan_start; 517 ic->ic_scan_end = wpi_scan_end; 518 ic->ic_set_channel = wpi_set_channel; 519 sc->sc_scan_curchan = ic->ic_scan_curchan; 520 ic->ic_scan_curchan = wpi_scan_curchan; 521 ic->ic_scan_mindwell = wpi_scan_mindwell; 522 ic->ic_setregdomain = wpi_setregdomain; 523 524 wpi_radiotap_attach(sc); 525 526 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 528 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 529 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 530 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 531 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 532 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 533 TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc); 534 535 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 536 taskqueue_thread_enqueue, &sc->sc_tq); 537 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 538 if (error != 0) { 539 device_printf(dev, "can't start threads, error %d\n", error); 540 goto fail; 541 } 542 543 wpi_sysctlattach(sc); 544 545 /* 546 * Hook our interrupt after all initialization is complete. 547 */ 548 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 549 NULL, wpi_intr, sc, &sc->sc_ih); 550 if (error != 0) { 551 device_printf(dev, "can't establish interrupt, error %d\n", 552 error); 553 goto fail; 554 } 555 556 if (bootverbose) 557 ieee80211_announce(ic); 558 559 #ifdef WPI_DEBUG 560 if (sc->sc_debug & WPI_DEBUG_HW) 561 ieee80211_announce_channels(ic); 562 #endif 563 564 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 565 return 0; 566 567 fail: wpi_detach(dev); 568 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 569 return error; 570 } 571 572 /* 573 * Attach the interface to 802.11 radiotap. 574 */ 575 static void 576 wpi_radiotap_attach(struct wpi_softc *sc) 577 { 578 struct ifnet *ifp = sc->sc_ifp; 579 struct ieee80211com *ic = ifp->if_l2com; 580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 581 ieee80211_radiotap_attach(ic, 582 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 583 WPI_TX_RADIOTAP_PRESENT, 584 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 585 WPI_RX_RADIOTAP_PRESENT); 586 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 587 } 588 589 static void 590 wpi_sysctlattach(struct wpi_softc *sc) 591 { 592 #ifdef WPI_DEBUG 593 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 594 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 595 596 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 597 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 598 "control debugging printfs"); 599 #endif 600 } 601 602 static void 603 wpi_init_beacon(struct wpi_vap *wvp) 604 { 605 struct wpi_buf *bcn = &wvp->wv_bcbuf; 606 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 607 608 cmd->id = WPI_ID_BROADCAST; 609 cmd->ofdm_mask = 0xff; 610 cmd->cck_mask = 0x0f; 611 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 612 cmd->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP); 613 614 bcn->code = WPI_CMD_SET_BEACON; 615 bcn->ac = WPI_CMD_QUEUE_NUM; 616 bcn->size = sizeof(struct wpi_cmd_beacon); 617 } 618 619 static struct ieee80211vap * 620 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 621 enum ieee80211_opmode opmode, int flags, 622 const uint8_t bssid[IEEE80211_ADDR_LEN], 623 const uint8_t mac[IEEE80211_ADDR_LEN]) 624 { 625 struct wpi_vap *wvp; 626 struct ieee80211vap *vap; 627 628 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 629 return NULL; 630 631 wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), 632 M_80211_VAP, M_NOWAIT | M_ZERO); 633 if (wvp == NULL) 634 return NULL; 635 vap = &wvp->wv_vap; 636 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 637 638 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 639 WPI_VAP_LOCK_INIT(wvp); 640 wpi_init_beacon(wvp); 641 } 642 643 /* Override with driver methods. */ 644 vap->iv_key_set = wpi_key_set; 645 vap->iv_key_delete = wpi_key_delete; 646 wvp->wv_newstate = vap->iv_newstate; 647 vap->iv_newstate = wpi_newstate; 648 vap->iv_update_beacon = wpi_update_beacon; 649 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 650 651 ieee80211_ratectl_init(vap); 652 /* Complete setup. */ 653 ieee80211_vap_attach(vap, ieee80211_media_change, 654 ieee80211_media_status); 655 ic->ic_opmode = opmode; 656 return vap; 657 } 658 659 static void 660 wpi_vap_delete(struct ieee80211vap *vap) 661 { 662 struct wpi_vap *wvp = WPI_VAP(vap); 663 struct wpi_buf *bcn = &wvp->wv_bcbuf; 664 enum ieee80211_opmode opmode = vap->iv_opmode; 665 666 ieee80211_ratectl_deinit(vap); 667 ieee80211_vap_detach(vap); 668 669 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 670 if (bcn->m != NULL) 671 m_freem(bcn->m); 672 673 WPI_VAP_LOCK_DESTROY(wvp); 674 } 675 676 free(wvp, M_80211_VAP); 677 } 678 679 static int 680 wpi_detach(device_t dev) 681 { 682 struct wpi_softc *sc = device_get_softc(dev); 683 struct ifnet *ifp = sc->sc_ifp; 684 struct ieee80211com *ic; 685 int qid; 686 687 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 688 689 if (ifp != NULL) { 690 ic = ifp->if_l2com; 691 692 ieee80211_draintask(ic, &sc->sc_reinittask); 693 ieee80211_draintask(ic, &sc->sc_radiooff_task); 694 ieee80211_draintask(ic, &sc->sc_radioon_task); 695 ieee80211_draintask(ic, &sc->sc_start_task); 696 697 wpi_stop(sc); 698 699 taskqueue_drain_all(sc->sc_tq); 700 taskqueue_free(sc->sc_tq); 701 702 callout_drain(&sc->watchdog_rfkill); 703 callout_drain(&sc->tx_timeout); 704 callout_drain(&sc->scan_timeout); 705 callout_drain(&sc->calib_to); 706 ieee80211_ifdetach(ic); 707 } 708 709 /* Uninstall interrupt handler. */ 710 if (sc->irq != NULL) { 711 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 712 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 713 sc->irq); 714 pci_release_msi(dev); 715 } 716 717 if (sc->txq[0].data_dmat) { 718 /* Free DMA resources. */ 719 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 720 wpi_free_tx_ring(sc, &sc->txq[qid]); 721 722 wpi_free_rx_ring(sc); 723 wpi_free_shared(sc); 724 } 725 726 if (sc->fw_dma.tag) 727 wpi_free_fwmem(sc); 728 729 if (sc->mem != NULL) 730 bus_release_resource(dev, SYS_RES_MEMORY, 731 rman_get_rid(sc->mem), sc->mem); 732 733 if (ifp != NULL) 734 if_free(ifp); 735 736 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 737 WPI_TXQ_STATE_LOCK_DESTROY(sc); 738 WPI_TXQ_LOCK_DESTROY(sc); 739 WPI_NT_LOCK_DESTROY(sc); 740 WPI_RXON_LOCK_DESTROY(sc); 741 WPI_TX_LOCK_DESTROY(sc); 742 WPI_LOCK_DESTROY(sc); 743 return 0; 744 } 745 746 static int 747 wpi_shutdown(device_t dev) 748 { 749 struct wpi_softc *sc = device_get_softc(dev); 750 751 wpi_stop(sc); 752 return 0; 753 } 754 755 static int 756 wpi_suspend(device_t dev) 757 { 758 struct wpi_softc *sc = device_get_softc(dev); 759 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 760 761 ieee80211_suspend_all(ic); 762 return 0; 763 } 764 765 static int 766 wpi_resume(device_t dev) 767 { 768 struct wpi_softc *sc = device_get_softc(dev); 769 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 770 771 /* Clear device-specific "PCI retry timeout" register (41h). */ 772 pci_write_config(dev, 0x41, 0, 1); 773 774 ieee80211_resume_all(ic); 775 return 0; 776 } 777 778 /* 779 * Grab exclusive access to NIC memory. 780 */ 781 static int 782 wpi_nic_lock(struct wpi_softc *sc) 783 { 784 int ntries; 785 786 /* Request exclusive access to NIC. */ 787 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 788 789 /* Spin until we actually get the lock. */ 790 for (ntries = 0; ntries < 1000; ntries++) { 791 if ((WPI_READ(sc, WPI_GP_CNTRL) & 792 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 793 WPI_GP_CNTRL_MAC_ACCESS_ENA) 794 return 0; 795 DELAY(10); 796 } 797 798 device_printf(sc->sc_dev, "could not lock memory\n"); 799 800 return ETIMEDOUT; 801 } 802 803 /* 804 * Release lock on NIC memory. 805 */ 806 static __inline void 807 wpi_nic_unlock(struct wpi_softc *sc) 808 { 809 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 810 } 811 812 static __inline uint32_t 813 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 814 { 815 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 816 WPI_BARRIER_READ_WRITE(sc); 817 return WPI_READ(sc, WPI_PRPH_RDATA); 818 } 819 820 static __inline void 821 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 822 { 823 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 824 WPI_BARRIER_WRITE(sc); 825 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 826 } 827 828 static __inline void 829 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 830 { 831 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 832 } 833 834 static __inline void 835 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 836 { 837 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 838 } 839 840 static __inline void 841 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 842 const uint32_t *data, int count) 843 { 844 for (; count > 0; count--, data++, addr += 4) 845 wpi_prph_write(sc, addr, *data); 846 } 847 848 static __inline uint32_t 849 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 850 { 851 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 852 WPI_BARRIER_READ_WRITE(sc); 853 return WPI_READ(sc, WPI_MEM_RDATA); 854 } 855 856 static __inline void 857 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 858 int count) 859 { 860 for (; count > 0; count--, addr += 4) 861 *data++ = wpi_mem_read(sc, addr); 862 } 863 864 static int 865 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 866 { 867 uint8_t *out = data; 868 uint32_t val; 869 int error, ntries; 870 871 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 872 873 if ((error = wpi_nic_lock(sc)) != 0) 874 return error; 875 876 for (; count > 0; count -= 2, addr++) { 877 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 878 for (ntries = 0; ntries < 10; ntries++) { 879 val = WPI_READ(sc, WPI_EEPROM); 880 if (val & WPI_EEPROM_READ_VALID) 881 break; 882 DELAY(5); 883 } 884 if (ntries == 10) { 885 device_printf(sc->sc_dev, 886 "timeout reading ROM at 0x%x\n", addr); 887 return ETIMEDOUT; 888 } 889 *out++= val >> 16; 890 if (count > 1) 891 *out ++= val >> 24; 892 } 893 894 wpi_nic_unlock(sc); 895 896 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 897 898 return 0; 899 } 900 901 static void 902 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 903 { 904 if (error != 0) 905 return; 906 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 907 *(bus_addr_t *)arg = segs[0].ds_addr; 908 } 909 910 /* 911 * Allocates a contiguous block of dma memory of the requested size and 912 * alignment. 913 */ 914 static int 915 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 916 void **kvap, bus_size_t size, bus_size_t alignment) 917 { 918 int error; 919 920 dma->tag = NULL; 921 dma->size = size; 922 923 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 924 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 925 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 926 if (error != 0) 927 goto fail; 928 929 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 930 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 931 if (error != 0) 932 goto fail; 933 934 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 935 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 936 if (error != 0) 937 goto fail; 938 939 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 940 941 if (kvap != NULL) 942 *kvap = dma->vaddr; 943 944 return 0; 945 946 fail: wpi_dma_contig_free(dma); 947 return error; 948 } 949 950 static void 951 wpi_dma_contig_free(struct wpi_dma_info *dma) 952 { 953 if (dma->vaddr != NULL) { 954 bus_dmamap_sync(dma->tag, dma->map, 955 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 956 bus_dmamap_unload(dma->tag, dma->map); 957 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 958 dma->vaddr = NULL; 959 } 960 if (dma->tag != NULL) { 961 bus_dma_tag_destroy(dma->tag); 962 dma->tag = NULL; 963 } 964 } 965 966 /* 967 * Allocate a shared page between host and NIC. 968 */ 969 static int 970 wpi_alloc_shared(struct wpi_softc *sc) 971 { 972 /* Shared buffer must be aligned on a 4KB boundary. */ 973 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 974 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 975 } 976 977 static void 978 wpi_free_shared(struct wpi_softc *sc) 979 { 980 wpi_dma_contig_free(&sc->shared_dma); 981 } 982 983 /* 984 * Allocate DMA-safe memory for firmware transfer. 985 */ 986 static int 987 wpi_alloc_fwmem(struct wpi_softc *sc) 988 { 989 /* Must be aligned on a 16-byte boundary. */ 990 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 991 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 992 } 993 994 static void 995 wpi_free_fwmem(struct wpi_softc *sc) 996 { 997 wpi_dma_contig_free(&sc->fw_dma); 998 } 999 1000 static int 1001 wpi_alloc_rx_ring(struct wpi_softc *sc) 1002 { 1003 struct wpi_rx_ring *ring = &sc->rxq; 1004 bus_size_t size; 1005 int i, error; 1006 1007 ring->cur = 0; 1008 ring->update = 0; 1009 1010 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1011 1012 /* Allocate RX descriptors (16KB aligned.) */ 1013 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1014 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1015 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1016 if (error != 0) { 1017 device_printf(sc->sc_dev, 1018 "%s: could not allocate RX ring DMA memory, error %d\n", 1019 __func__, error); 1020 goto fail; 1021 } 1022 1023 /* Create RX buffer DMA tag. */ 1024 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1025 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1026 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1027 &ring->data_dmat); 1028 if (error != 0) { 1029 device_printf(sc->sc_dev, 1030 "%s: could not create RX buf DMA tag, error %d\n", 1031 __func__, error); 1032 goto fail; 1033 } 1034 1035 /* 1036 * Allocate and map RX buffers. 1037 */ 1038 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1039 struct wpi_rx_data *data = &ring->data[i]; 1040 bus_addr_t paddr; 1041 1042 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1043 if (error != 0) { 1044 device_printf(sc->sc_dev, 1045 "%s: could not create RX buf DMA map, error %d\n", 1046 __func__, error); 1047 goto fail; 1048 } 1049 1050 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1051 if (data->m == NULL) { 1052 device_printf(sc->sc_dev, 1053 "%s: could not allocate RX mbuf\n", __func__); 1054 error = ENOBUFS; 1055 goto fail; 1056 } 1057 1058 error = bus_dmamap_load(ring->data_dmat, data->map, 1059 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1060 &paddr, BUS_DMA_NOWAIT); 1061 if (error != 0 && error != EFBIG) { 1062 device_printf(sc->sc_dev, 1063 "%s: can't map mbuf (error %d)\n", __func__, 1064 error); 1065 goto fail; 1066 } 1067 1068 /* Set physical address of RX buffer. */ 1069 ring->desc[i] = htole32(paddr); 1070 } 1071 1072 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1073 BUS_DMASYNC_PREWRITE); 1074 1075 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1076 1077 return 0; 1078 1079 fail: wpi_free_rx_ring(sc); 1080 1081 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1082 1083 return error; 1084 } 1085 1086 static void 1087 wpi_update_rx_ring(struct wpi_softc *sc) 1088 { 1089 struct wpi_rx_ring *ring = &sc->rxq; 1090 1091 if (ring->update != 0) { 1092 /* Wait for INT_WAKEUP event. */ 1093 return; 1094 } 1095 1096 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1097 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1098 __func__); 1099 1100 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1101 ring->update = 1; 1102 } else 1103 WPI_WRITE(sc, WPI_FH_RX_WPTR, ring->cur & ~7); 1104 } 1105 1106 static void 1107 wpi_reset_rx_ring(struct wpi_softc *sc) 1108 { 1109 struct wpi_rx_ring *ring = &sc->rxq; 1110 int ntries; 1111 1112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1113 1114 if (wpi_nic_lock(sc) == 0) { 1115 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1116 for (ntries = 0; ntries < 1000; ntries++) { 1117 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1118 WPI_FH_RX_STATUS_IDLE) 1119 break; 1120 DELAY(10); 1121 } 1122 wpi_nic_unlock(sc); 1123 } 1124 1125 ring->cur = 0; 1126 ring->update = 0; 1127 } 1128 1129 static void 1130 wpi_free_rx_ring(struct wpi_softc *sc) 1131 { 1132 struct wpi_rx_ring *ring = &sc->rxq; 1133 int i; 1134 1135 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1136 1137 wpi_dma_contig_free(&ring->desc_dma); 1138 1139 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1140 struct wpi_rx_data *data = &ring->data[i]; 1141 1142 if (data->m != NULL) { 1143 bus_dmamap_sync(ring->data_dmat, data->map, 1144 BUS_DMASYNC_POSTREAD); 1145 bus_dmamap_unload(ring->data_dmat, data->map); 1146 m_freem(data->m); 1147 data->m = NULL; 1148 } 1149 if (data->map != NULL) 1150 bus_dmamap_destroy(ring->data_dmat, data->map); 1151 } 1152 if (ring->data_dmat != NULL) { 1153 bus_dma_tag_destroy(ring->data_dmat); 1154 ring->data_dmat = NULL; 1155 } 1156 } 1157 1158 static int 1159 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1160 { 1161 bus_addr_t paddr; 1162 bus_size_t size; 1163 int i, error; 1164 1165 ring->qid = qid; 1166 ring->queued = 0; 1167 ring->cur = 0; 1168 ring->update = 0; 1169 1170 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1171 1172 /* Allocate TX descriptors (16KB aligned.) */ 1173 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1174 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1175 size, WPI_RING_DMA_ALIGN); 1176 if (error != 0) { 1177 device_printf(sc->sc_dev, 1178 "%s: could not allocate TX ring DMA memory, error %d\n", 1179 __func__, error); 1180 goto fail; 1181 } 1182 1183 /* Update shared area with ring physical address. */ 1184 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1185 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1186 BUS_DMASYNC_PREWRITE); 1187 1188 /* 1189 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1190 * to allocate commands space for other rings. 1191 * XXX Do we really need to allocate descriptors for other rings? 1192 */ 1193 if (qid > WPI_CMD_QUEUE_NUM) { 1194 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1195 return 0; 1196 } 1197 1198 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1199 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1200 size, 4); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not allocate TX cmd DMA memory, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1209 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1210 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1211 &ring->data_dmat); 1212 if (error != 0) { 1213 device_printf(sc->sc_dev, 1214 "%s: could not create TX buf DMA tag, error %d\n", 1215 __func__, error); 1216 goto fail; 1217 } 1218 1219 paddr = ring->cmd_dma.paddr; 1220 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1221 struct wpi_tx_data *data = &ring->data[i]; 1222 1223 data->cmd_paddr = paddr; 1224 paddr += sizeof (struct wpi_tx_cmd); 1225 1226 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1227 if (error != 0) { 1228 device_printf(sc->sc_dev, 1229 "%s: could not create TX buf DMA map, error %d\n", 1230 __func__, error); 1231 goto fail; 1232 } 1233 } 1234 1235 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1236 1237 return 0; 1238 1239 fail: wpi_free_tx_ring(sc, ring); 1240 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1241 return error; 1242 } 1243 1244 static void 1245 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1246 { 1247 if (ring->update != 0) { 1248 /* Wait for INT_WAKEUP event. */ 1249 return; 1250 } 1251 1252 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1253 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1254 __func__, ring->qid); 1255 1256 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1257 ring->update = 1; 1258 } else 1259 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1260 } 1261 1262 static void 1263 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1264 { 1265 int i; 1266 1267 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1268 1269 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1270 struct wpi_tx_data *data = &ring->data[i]; 1271 1272 if (data->m != NULL) { 1273 bus_dmamap_sync(ring->data_dmat, data->map, 1274 BUS_DMASYNC_POSTWRITE); 1275 bus_dmamap_unload(ring->data_dmat, data->map); 1276 m_freem(data->m); 1277 data->m = NULL; 1278 } 1279 } 1280 /* Clear TX descriptors. */ 1281 memset(ring->desc, 0, ring->desc_dma.size); 1282 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1283 BUS_DMASYNC_PREWRITE); 1284 sc->qfullmsk &= ~(1 << ring->qid); 1285 ring->queued = 0; 1286 ring->cur = 0; 1287 ring->update = 0; 1288 } 1289 1290 static void 1291 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1292 { 1293 int i; 1294 1295 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1296 1297 wpi_dma_contig_free(&ring->desc_dma); 1298 wpi_dma_contig_free(&ring->cmd_dma); 1299 1300 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1301 struct wpi_tx_data *data = &ring->data[i]; 1302 1303 if (data->m != NULL) { 1304 bus_dmamap_sync(ring->data_dmat, data->map, 1305 BUS_DMASYNC_POSTWRITE); 1306 bus_dmamap_unload(ring->data_dmat, data->map); 1307 m_freem(data->m); 1308 } 1309 if (data->map != NULL) 1310 bus_dmamap_destroy(ring->data_dmat, data->map); 1311 } 1312 if (ring->data_dmat != NULL) { 1313 bus_dma_tag_destroy(ring->data_dmat); 1314 ring->data_dmat = NULL; 1315 } 1316 } 1317 1318 /* 1319 * Extract various information from EEPROM. 1320 */ 1321 static int 1322 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1323 { 1324 #define WPI_CHK(res) do { \ 1325 if ((error = res) != 0) \ 1326 goto fail; \ 1327 } while (0) 1328 int error, i; 1329 1330 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1331 1332 /* Adapter has to be powered on for EEPROM access to work. */ 1333 if ((error = wpi_apm_init(sc)) != 0) { 1334 device_printf(sc->sc_dev, 1335 "%s: could not power ON adapter, error %d\n", __func__, 1336 error); 1337 return error; 1338 } 1339 1340 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1341 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1342 error = EIO; 1343 goto fail; 1344 } 1345 /* Clear HW ownership of EEPROM. */ 1346 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1347 1348 /* Read the hardware capabilities, revision and SKU type. */ 1349 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1350 sizeof(sc->cap))); 1351 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1352 sizeof(sc->rev))); 1353 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1354 sizeof(sc->type))); 1355 1356 sc->rev = le16toh(sc->rev); 1357 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1358 sc->rev, sc->type); 1359 1360 /* Read the regulatory domain (4 ASCII characters.) */ 1361 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1362 sizeof(sc->domain))); 1363 1364 /* Read MAC address. */ 1365 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1366 IEEE80211_ADDR_LEN)); 1367 1368 /* Read the list of authorized channels. */ 1369 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1370 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1371 1372 /* Read the list of TX power groups. */ 1373 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1374 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1375 1376 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1377 1378 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1379 __func__); 1380 1381 return error; 1382 #undef WPI_CHK 1383 } 1384 1385 /* 1386 * Translate EEPROM flags to net80211. 1387 */ 1388 static uint32_t 1389 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1390 { 1391 uint32_t nflags; 1392 1393 nflags = 0; 1394 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1395 nflags |= IEEE80211_CHAN_PASSIVE; 1396 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1397 nflags |= IEEE80211_CHAN_NOADHOC; 1398 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1399 nflags |= IEEE80211_CHAN_DFS; 1400 /* XXX apparently IBSS may still be marked */ 1401 nflags |= IEEE80211_CHAN_NOADHOC; 1402 } 1403 1404 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1405 if (nflags & IEEE80211_CHAN_NOADHOC) 1406 nflags |= IEEE80211_CHAN_NOHOSTAP; 1407 1408 return nflags; 1409 } 1410 1411 static void 1412 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1413 { 1414 struct ifnet *ifp = sc->sc_ifp; 1415 struct ieee80211com *ic = ifp->if_l2com; 1416 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1417 const struct wpi_chan_band *band = &wpi_bands[n]; 1418 struct ieee80211_channel *c; 1419 uint8_t chan; 1420 int i, nflags; 1421 1422 for (i = 0; i < band->nchan; i++) { 1423 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1424 DPRINTF(sc, WPI_DEBUG_EEPROM, 1425 "Channel Not Valid: %d, band %d\n", 1426 band->chan[i],n); 1427 continue; 1428 } 1429 1430 chan = band->chan[i]; 1431 nflags = wpi_eeprom_channel_flags(&channels[i]); 1432 1433 c = &ic->ic_channels[ic->ic_nchans++]; 1434 c->ic_ieee = chan; 1435 c->ic_maxregpower = channels[i].maxpwr; 1436 c->ic_maxpower = 2*c->ic_maxregpower; 1437 1438 if (n == 0) { /* 2GHz band */ 1439 c->ic_freq = ieee80211_ieee2mhz(chan, 1440 IEEE80211_CHAN_G); 1441 1442 /* G =>'s B is supported */ 1443 c->ic_flags = IEEE80211_CHAN_B | nflags; 1444 c = &ic->ic_channels[ic->ic_nchans++]; 1445 c[0] = c[-1]; 1446 c->ic_flags = IEEE80211_CHAN_G | nflags; 1447 } else { /* 5GHz band */ 1448 c->ic_freq = ieee80211_ieee2mhz(chan, 1449 IEEE80211_CHAN_A); 1450 1451 c->ic_flags = IEEE80211_CHAN_A | nflags; 1452 } 1453 1454 /* Save maximum allowed TX power for this channel. */ 1455 sc->maxpwr[chan] = channels[i].maxpwr; 1456 1457 DPRINTF(sc, WPI_DEBUG_EEPROM, 1458 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1459 " offset %d\n", chan, c->ic_freq, 1460 channels[i].flags, sc->maxpwr[chan], 1461 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1462 } 1463 } 1464 1465 /** 1466 * Read the eeprom to find out what channels are valid for the given 1467 * band and update net80211 with what we find. 1468 */ 1469 static int 1470 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1471 { 1472 struct ifnet *ifp = sc->sc_ifp; 1473 struct ieee80211com *ic = ifp->if_l2com; 1474 const struct wpi_chan_band *band = &wpi_bands[n]; 1475 int error; 1476 1477 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1478 1479 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1480 band->nchan * sizeof (struct wpi_eeprom_chan)); 1481 if (error != 0) { 1482 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1483 return error; 1484 } 1485 1486 wpi_read_eeprom_band(sc, n); 1487 1488 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1489 1490 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1491 1492 return 0; 1493 } 1494 1495 static struct wpi_eeprom_chan * 1496 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1497 { 1498 int i, j; 1499 1500 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1501 for (i = 0; i < wpi_bands[j].nchan; i++) 1502 if (wpi_bands[j].chan[i] == c->ic_ieee) 1503 return &sc->eeprom_channels[j][i]; 1504 1505 return NULL; 1506 } 1507 1508 /* 1509 * Enforce flags read from EEPROM. 1510 */ 1511 static int 1512 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1513 int nchan, struct ieee80211_channel chans[]) 1514 { 1515 struct ifnet *ifp = ic->ic_ifp; 1516 struct wpi_softc *sc = ifp->if_softc; 1517 int i; 1518 1519 for (i = 0; i < nchan; i++) { 1520 struct ieee80211_channel *c = &chans[i]; 1521 struct wpi_eeprom_chan *channel; 1522 1523 channel = wpi_find_eeprom_channel(sc, c); 1524 if (channel == NULL) { 1525 if_printf(ic->ic_ifp, 1526 "%s: invalid channel %u freq %u/0x%x\n", 1527 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1528 return EINVAL; 1529 } 1530 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1531 } 1532 1533 return 0; 1534 } 1535 1536 static int 1537 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1538 { 1539 struct wpi_power_group *group = &sc->groups[n]; 1540 struct wpi_eeprom_group rgroup; 1541 int i, error; 1542 1543 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1544 1545 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1546 &rgroup, sizeof rgroup)) != 0) { 1547 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1548 return error; 1549 } 1550 1551 /* Save TX power group information. */ 1552 group->chan = rgroup.chan; 1553 group->maxpwr = rgroup.maxpwr; 1554 /* Retrieve temperature at which the samples were taken. */ 1555 group->temp = (int16_t)le16toh(rgroup.temp); 1556 1557 DPRINTF(sc, WPI_DEBUG_EEPROM, 1558 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1559 group->maxpwr, group->temp); 1560 1561 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1562 group->samples[i].index = rgroup.samples[i].index; 1563 group->samples[i].power = rgroup.samples[i].power; 1564 1565 DPRINTF(sc, WPI_DEBUG_EEPROM, 1566 "\tsample %d: index=%d power=%d\n", i, 1567 group->samples[i].index, group->samples[i].power); 1568 } 1569 1570 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1571 1572 return 0; 1573 } 1574 1575 static int 1576 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1577 { 1578 int newid = WPI_ID_IBSS_MIN; 1579 1580 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1581 if ((sc->nodesmsk & (1 << newid)) == 0) { 1582 sc->nodesmsk |= 1 << newid; 1583 return newid; 1584 } 1585 } 1586 1587 return WPI_ID_UNDEFINED; 1588 } 1589 1590 static __inline int 1591 wpi_add_node_entry_sta(struct wpi_softc *sc) 1592 { 1593 sc->nodesmsk |= 1 << WPI_ID_BSS; 1594 1595 return WPI_ID_BSS; 1596 } 1597 1598 static __inline int 1599 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1600 { 1601 if (id == WPI_ID_UNDEFINED) 1602 return 0; 1603 1604 return (sc->nodesmsk >> id) & 1; 1605 } 1606 1607 static __inline void 1608 wpi_clear_node_table(struct wpi_softc *sc) 1609 { 1610 sc->nodesmsk = 0; 1611 } 1612 1613 static __inline void 1614 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1615 { 1616 sc->nodesmsk &= ~(1 << id); 1617 } 1618 1619 static struct ieee80211_node * 1620 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1621 { 1622 struct wpi_node *wn; 1623 1624 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1625 M_NOWAIT | M_ZERO); 1626 1627 if (wn == NULL) 1628 return NULL; 1629 1630 wn->id = WPI_ID_UNDEFINED; 1631 1632 return &wn->ni; 1633 } 1634 1635 static void 1636 wpi_node_free(struct ieee80211_node *ni) 1637 { 1638 struct ieee80211com *ic = ni->ni_ic; 1639 struct wpi_softc *sc = ic->ic_ifp->if_softc; 1640 struct wpi_node *wn = WPI_NODE(ni); 1641 1642 if (wn->id != WPI_ID_UNDEFINED) { 1643 WPI_NT_LOCK(sc); 1644 if (wpi_check_node_entry(sc, wn->id)) { 1645 wpi_del_node_entry(sc, wn->id); 1646 wpi_del_node(sc, ni); 1647 } 1648 WPI_NT_UNLOCK(sc); 1649 } 1650 1651 sc->sc_node_free(ni); 1652 } 1653 1654 /** 1655 * Called by net80211 when ever there is a change to 80211 state machine 1656 */ 1657 static int 1658 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1659 { 1660 struct wpi_vap *wvp = WPI_VAP(vap); 1661 struct ieee80211com *ic = vap->iv_ic; 1662 struct ifnet *ifp = ic->ic_ifp; 1663 struct wpi_softc *sc = ifp->if_softc; 1664 int error = 0; 1665 1666 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1667 1668 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1669 ieee80211_state_name[vap->iv_state], 1670 ieee80211_state_name[nstate]); 1671 1672 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 1673 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1674 device_printf(sc->sc_dev, 1675 "%s: could not set power saving level\n", 1676 __func__); 1677 return error; 1678 } 1679 } 1680 1681 switch (nstate) { 1682 case IEEE80211_S_SCAN: 1683 WPI_RXON_LOCK(sc); 1684 if ((sc->rxon.filter & htole32(WPI_FILTER_BSS)) && 1685 vap->iv_opmode != IEEE80211_M_STA) { 1686 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1687 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1688 device_printf(sc->sc_dev, 1689 "%s: could not send RXON\n", __func__); 1690 } 1691 } 1692 WPI_RXON_UNLOCK(sc); 1693 break; 1694 1695 case IEEE80211_S_ASSOC: 1696 if (vap->iv_state != IEEE80211_S_RUN) 1697 break; 1698 /* FALLTHROUGH */ 1699 case IEEE80211_S_AUTH: 1700 /* 1701 * The node must be registered in the firmware before auth. 1702 * Also the associd must be cleared on RUN -> ASSOC 1703 * transitions. 1704 */ 1705 if ((error = wpi_auth(sc, vap)) != 0) { 1706 device_printf(sc->sc_dev, 1707 "%s: could not move to AUTH state, error %d\n", 1708 __func__, error); 1709 } 1710 break; 1711 1712 case IEEE80211_S_RUN: 1713 /* 1714 * RUN -> RUN transition; Just restart the timers. 1715 */ 1716 if (vap->iv_state == IEEE80211_S_RUN) { 1717 WPI_RXON_LOCK(sc); 1718 wpi_calib_timeout(sc); 1719 WPI_RXON_UNLOCK(sc); 1720 break; 1721 } 1722 1723 /* 1724 * !RUN -> RUN requires setting the association id 1725 * which is done with a firmware cmd. We also defer 1726 * starting the timers until that work is done. 1727 */ 1728 if ((error = wpi_run(sc, vap)) != 0) { 1729 device_printf(sc->sc_dev, 1730 "%s: could not move to RUN state\n", __func__); 1731 } 1732 break; 1733 1734 default: 1735 break; 1736 } 1737 if (error != 0) { 1738 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1739 return error; 1740 } 1741 1742 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1743 1744 return wvp->wv_newstate(vap, nstate, arg); 1745 } 1746 1747 static void 1748 wpi_calib_timeout(void *arg) 1749 { 1750 struct wpi_softc *sc = arg; 1751 1752 if (!(sc->rxon.filter & htole32(WPI_FILTER_BSS))) 1753 return; 1754 1755 wpi_power_calibration(sc); 1756 1757 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1758 } 1759 1760 static __inline uint8_t 1761 rate2plcp(const uint8_t rate) 1762 { 1763 switch (rate) { 1764 case 12: return 0xd; 1765 case 18: return 0xf; 1766 case 24: return 0x5; 1767 case 36: return 0x7; 1768 case 48: return 0x9; 1769 case 72: return 0xb; 1770 case 96: return 0x1; 1771 case 108: return 0x3; 1772 case 2: return 10; 1773 case 4: return 20; 1774 case 11: return 55; 1775 case 22: return 110; 1776 default: return 0; 1777 } 1778 } 1779 1780 static __inline uint8_t 1781 plcp2rate(const uint8_t plcp) 1782 { 1783 switch (plcp) { 1784 case 0xd: return 12; 1785 case 0xf: return 18; 1786 case 0x5: return 24; 1787 case 0x7: return 36; 1788 case 0x9: return 48; 1789 case 0xb: return 72; 1790 case 0x1: return 96; 1791 case 0x3: return 108; 1792 case 10: return 2; 1793 case 20: return 4; 1794 case 55: return 11; 1795 case 110: return 22; 1796 default: return 0; 1797 } 1798 } 1799 1800 /* Quickly determine if a given rate is CCK or OFDM. */ 1801 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1802 1803 static void 1804 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1805 struct wpi_rx_data *data) 1806 { 1807 struct ifnet *ifp = sc->sc_ifp; 1808 struct ieee80211com *ic = ifp->if_l2com; 1809 struct wpi_rx_ring *ring = &sc->rxq; 1810 struct wpi_rx_stat *stat; 1811 struct wpi_rx_head *head; 1812 struct wpi_rx_tail *tail; 1813 struct ieee80211_frame *wh; 1814 struct ieee80211_node *ni; 1815 struct mbuf *m, *m1; 1816 bus_addr_t paddr; 1817 uint32_t flags; 1818 uint16_t len; 1819 int error; 1820 1821 stat = (struct wpi_rx_stat *)(desc + 1); 1822 1823 if (stat->len > WPI_STAT_MAXLEN) { 1824 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1825 goto fail1; 1826 } 1827 1828 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1829 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1830 len = le16toh(head->len); 1831 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1832 flags = le32toh(tail->flags); 1833 1834 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1835 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1836 le32toh(desc->len), len, (int8_t)stat->rssi, 1837 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1838 1839 /* Discard frames with a bad FCS early. */ 1840 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1841 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1842 __func__, flags); 1843 goto fail1; 1844 } 1845 /* Discard frames that are too short. */ 1846 if (len < sizeof (*wh)) { 1847 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1848 __func__, len); 1849 goto fail1; 1850 } 1851 1852 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1853 if (m1 == NULL) { 1854 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1855 __func__); 1856 goto fail1; 1857 } 1858 bus_dmamap_unload(ring->data_dmat, data->map); 1859 1860 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1861 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1862 if (error != 0 && error != EFBIG) { 1863 device_printf(sc->sc_dev, 1864 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1865 m_freem(m1); 1866 1867 /* Try to reload the old mbuf. */ 1868 error = bus_dmamap_load(ring->data_dmat, data->map, 1869 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1870 &paddr, BUS_DMA_NOWAIT); 1871 if (error != 0 && error != EFBIG) { 1872 panic("%s: could not load old RX mbuf", __func__); 1873 } 1874 /* Physical address may have changed. */ 1875 ring->desc[ring->cur] = htole32(paddr); 1876 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1877 BUS_DMASYNC_PREWRITE); 1878 goto fail1; 1879 } 1880 1881 m = data->m; 1882 data->m = m1; 1883 /* Update RX descriptor. */ 1884 ring->desc[ring->cur] = htole32(paddr); 1885 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1886 BUS_DMASYNC_PREWRITE); 1887 1888 /* Finalize mbuf. */ 1889 m->m_pkthdr.rcvif = ifp; 1890 m->m_data = (caddr_t)(head + 1); 1891 m->m_pkthdr.len = m->m_len = len; 1892 1893 /* Grab a reference to the source node. */ 1894 wh = mtod(m, struct ieee80211_frame *); 1895 1896 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1897 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 1898 /* Check whether decryption was successful or not. */ 1899 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 1900 DPRINTF(sc, WPI_DEBUG_RECV, 1901 "CCMP decryption failed 0x%x\n", flags); 1902 goto fail2; 1903 } 1904 m->m_flags |= M_WEP; 1905 } 1906 1907 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 1908 1909 if (ieee80211_radiotap_active(ic)) { 1910 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 1911 1912 tap->wr_flags = 0; 1913 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 1914 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 1915 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 1916 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 1917 tap->wr_tsft = tail->tstamp; 1918 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 1919 tap->wr_rate = plcp2rate(head->plcp); 1920 } 1921 1922 WPI_UNLOCK(sc); 1923 1924 /* Send the frame to the 802.11 layer. */ 1925 if (ni != NULL) { 1926 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 1927 /* Node is no longer needed. */ 1928 ieee80211_free_node(ni); 1929 } else 1930 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 1931 1932 WPI_LOCK(sc); 1933 1934 return; 1935 1936 fail2: m_freem(m); 1937 1938 fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1939 } 1940 1941 static void 1942 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1943 struct wpi_rx_data *data) 1944 { 1945 /* Ignore */ 1946 } 1947 1948 static void 1949 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 1950 { 1951 struct ifnet *ifp = sc->sc_ifp; 1952 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 1953 struct wpi_tx_data *data = &ring->data[desc->idx]; 1954 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 1955 struct mbuf *m; 1956 struct ieee80211_node *ni; 1957 struct ieee80211vap *vap; 1958 struct ieee80211com *ic; 1959 uint32_t status = le32toh(stat->status); 1960 int ackfailcnt = stat->ackfailcnt / 2; /* wpi_mrr_setup() */ 1961 1962 KASSERT(data->ni != NULL, ("no node")); 1963 KASSERT(data->m != NULL, ("no mbuf")); 1964 1965 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1966 1967 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 1968 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 1969 "status %x\n", __func__, desc->qid, desc->idx, ackfailcnt, 1970 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 1971 1972 /* Unmap and free mbuf. */ 1973 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 1974 bus_dmamap_unload(ring->data_dmat, data->map); 1975 m = data->m, data->m = NULL; 1976 ni = data->ni, data->ni = NULL; 1977 vap = ni->ni_vap; 1978 ic = vap->iv_ic; 1979 1980 /* 1981 * Update rate control statistics for the node. 1982 */ 1983 if ((status & 0xff) != 1) { 1984 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1985 ieee80211_ratectl_tx_complete(vap, ni, 1986 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 1987 } else { 1988 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1989 ieee80211_ratectl_tx_complete(vap, ni, 1990 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 1991 } 1992 1993 ieee80211_tx_complete(ni, m, (status & 0xff) != 1); 1994 1995 WPI_TXQ_STATE_LOCK(sc); 1996 ring->queued -= 1; 1997 if (ring->queued > 0) { 1998 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 1999 2000 if (sc->qfullmsk != 0 && 2001 ring->queued < WPI_TX_RING_LOMARK) { 2002 sc->qfullmsk &= ~(1 << ring->qid); 2003 IF_LOCK(&ifp->if_snd); 2004 if (sc->qfullmsk == 0 && 2005 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2006 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2007 IF_UNLOCK(&ifp->if_snd); 2008 ieee80211_runtask(ic, &sc->sc_start_task); 2009 } else 2010 IF_UNLOCK(&ifp->if_snd); 2011 } 2012 } else 2013 callout_stop(&sc->tx_timeout); 2014 WPI_TXQ_STATE_UNLOCK(sc); 2015 2016 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2017 } 2018 2019 /* 2020 * Process a "command done" firmware notification. This is where we wakeup 2021 * processes waiting for a synchronous command completion. 2022 */ 2023 static void 2024 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2025 { 2026 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2027 struct wpi_tx_data *data; 2028 2029 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2030 "type %s len %d\n", desc->qid, desc->idx, 2031 desc->flags, wpi_cmd_str(desc->type), 2032 le32toh(desc->len)); 2033 2034 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2035 return; /* Not a command ack. */ 2036 2037 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2038 2039 data = &ring->data[desc->idx]; 2040 2041 /* If the command was mapped in an mbuf, free it. */ 2042 if (data->m != NULL) { 2043 bus_dmamap_sync(ring->data_dmat, data->map, 2044 BUS_DMASYNC_POSTWRITE); 2045 bus_dmamap_unload(ring->data_dmat, data->map); 2046 m_freem(data->m); 2047 data->m = NULL; 2048 } 2049 2050 wakeup(&ring->cmd[desc->idx]); 2051 } 2052 2053 static void 2054 wpi_notif_intr(struct wpi_softc *sc) 2055 { 2056 struct ifnet *ifp = sc->sc_ifp; 2057 struct ieee80211com *ic = ifp->if_l2com; 2058 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2059 uint32_t hw; 2060 2061 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2062 BUS_DMASYNC_POSTREAD); 2063 2064 hw = le32toh(sc->shared->next); 2065 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2066 2067 while (sc->rxq.cur != hw) { 2068 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2069 2070 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2071 struct wpi_rx_desc *desc; 2072 2073 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2074 BUS_DMASYNC_POSTREAD); 2075 desc = mtod(data->m, struct wpi_rx_desc *); 2076 2077 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2078 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2079 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2080 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2081 2082 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2083 /* Reply to a command. */ 2084 wpi_cmd_done(sc, desc); 2085 } 2086 2087 switch (desc->type) { 2088 case WPI_RX_DONE: 2089 /* An 802.11 frame has been received. */ 2090 wpi_rx_done(sc, desc, data); 2091 2092 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2093 /* wpi_stop() was called. */ 2094 return; 2095 } 2096 2097 break; 2098 2099 case WPI_TX_DONE: 2100 /* An 802.11 frame has been transmitted. */ 2101 wpi_tx_done(sc, desc); 2102 break; 2103 2104 case WPI_RX_STATISTICS: 2105 case WPI_BEACON_STATISTICS: 2106 wpi_rx_statistics(sc, desc, data); 2107 break; 2108 2109 case WPI_BEACON_MISSED: 2110 { 2111 struct wpi_beacon_missed *miss = 2112 (struct wpi_beacon_missed *)(desc + 1); 2113 uint32_t misses; 2114 2115 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2116 BUS_DMASYNC_POSTREAD); 2117 misses = le32toh(miss->consecutive); 2118 2119 DPRINTF(sc, WPI_DEBUG_STATE, 2120 "%s: beacons missed %d/%d\n", __func__, misses, 2121 le32toh(miss->total)); 2122 2123 if (vap->iv_state == IEEE80211_S_RUN && 2124 (ic->ic_flags & IEEE80211_F_SCAN) == 0 && 2125 misses >= vap->iv_bmissthreshold) 2126 ieee80211_beacon_miss(ic); 2127 2128 break; 2129 } 2130 case WPI_UC_READY: 2131 { 2132 struct wpi_ucode_info *uc = 2133 (struct wpi_ucode_info *)(desc + 1); 2134 2135 /* The microcontroller is ready. */ 2136 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2137 BUS_DMASYNC_POSTREAD); 2138 DPRINTF(sc, WPI_DEBUG_RESET, 2139 "microcode alive notification version=%d.%d " 2140 "subtype=%x alive=%x\n", uc->major, uc->minor, 2141 uc->subtype, le32toh(uc->valid)); 2142 2143 if (le32toh(uc->valid) != 1) { 2144 device_printf(sc->sc_dev, 2145 "microcontroller initialization failed\n"); 2146 wpi_stop_locked(sc); 2147 } 2148 /* Save the address of the error log in SRAM. */ 2149 sc->errptr = le32toh(uc->errptr); 2150 break; 2151 } 2152 case WPI_STATE_CHANGED: 2153 { 2154 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2155 BUS_DMASYNC_POSTREAD); 2156 2157 uint32_t *status = (uint32_t *)(desc + 1); 2158 2159 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2160 le32toh(*status)); 2161 2162 if (le32toh(*status) & 1) { 2163 WPI_NT_LOCK(sc); 2164 wpi_clear_node_table(sc); 2165 WPI_NT_UNLOCK(sc); 2166 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2167 return; 2168 } 2169 break; 2170 } 2171 case WPI_START_SCAN: 2172 { 2173 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2174 BUS_DMASYNC_POSTREAD); 2175 #ifdef WPI_DEBUG 2176 struct wpi_start_scan *scan = 2177 (struct wpi_start_scan *)(desc + 1); 2178 DPRINTF(sc, WPI_DEBUG_SCAN, 2179 "%s: scanning channel %d status %x\n", 2180 __func__, scan->chan, le32toh(scan->status)); 2181 #endif 2182 break; 2183 } 2184 case WPI_STOP_SCAN: 2185 { 2186 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2187 BUS_DMASYNC_POSTREAD); 2188 #ifdef WPI_DEBUG 2189 struct wpi_stop_scan *scan = 2190 (struct wpi_stop_scan *)(desc + 1); 2191 DPRINTF(sc, WPI_DEBUG_SCAN, 2192 "scan finished nchan=%d status=%d chan=%d\n", 2193 scan->nchan, scan->status, scan->chan); 2194 #endif 2195 WPI_RXON_LOCK(sc); 2196 callout_stop(&sc->scan_timeout); 2197 WPI_RXON_UNLOCK(sc); 2198 ieee80211_scan_next(vap); 2199 break; 2200 } 2201 } 2202 2203 if (sc->rxq.cur % 8 == 0) { 2204 /* Tell the firmware what we have processed. */ 2205 wpi_update_rx_ring(sc); 2206 } 2207 } 2208 } 2209 2210 /* 2211 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2212 * from power-down sleep mode. 2213 */ 2214 static void 2215 wpi_wakeup_intr(struct wpi_softc *sc) 2216 { 2217 int qid; 2218 2219 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2220 "%s: ucode wakeup from power-down sleep\n", __func__); 2221 2222 /* Wakeup RX and TX rings. */ 2223 if (sc->rxq.update) { 2224 sc->rxq.update = 0; 2225 wpi_update_rx_ring(sc); 2226 } 2227 WPI_TXQ_LOCK(sc); 2228 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2229 struct wpi_tx_ring *ring = &sc->txq[qid]; 2230 2231 if (ring->update) { 2232 ring->update = 0; 2233 wpi_update_tx_ring(sc, ring); 2234 } 2235 } 2236 WPI_TXQ_UNLOCK(sc); 2237 2238 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2239 } 2240 2241 /* 2242 * This function prints firmware registers 2243 */ 2244 #ifdef WPI_DEBUG 2245 static void 2246 wpi_debug_registers(struct wpi_softc *sc) 2247 { 2248 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 2249 int i; 2250 static const uint32_t csr_tbl[] = { 2251 WPI_HW_IF_CONFIG, 2252 WPI_INT, 2253 WPI_INT_MASK, 2254 WPI_FH_INT, 2255 WPI_GPIO_IN, 2256 WPI_RESET, 2257 WPI_GP_CNTRL, 2258 WPI_EEPROM, 2259 WPI_EEPROM_GP, 2260 WPI_GIO, 2261 WPI_UCODE_GP1, 2262 WPI_UCODE_GP2, 2263 WPI_GIO_CHICKEN, 2264 WPI_ANA_PLL, 2265 WPI_DBG_HPET_MEM, 2266 }; 2267 static const uint32_t prph_tbl[] = { 2268 WPI_APMG_CLK_CTRL, 2269 WPI_APMG_PS, 2270 WPI_APMG_PCI_STT, 2271 WPI_APMG_RFKILL, 2272 }; 2273 2274 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2275 2276 for (i = 0; i < COUNTOF(csr_tbl); i++) { 2277 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2278 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2279 2280 if ((i + 1) % 2 == 0) 2281 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2282 } 2283 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2284 2285 if (wpi_nic_lock(sc) == 0) { 2286 for (i = 0; i < COUNTOF(prph_tbl); i++) { 2287 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2288 wpi_get_prph_string(prph_tbl[i]), 2289 wpi_prph_read(sc, prph_tbl[i])); 2290 2291 if ((i + 1) % 2 == 0) 2292 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2293 } 2294 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2295 wpi_nic_unlock(sc); 2296 } else { 2297 DPRINTF(sc, WPI_DEBUG_REGISTER, 2298 "Cannot access internal registers.\n"); 2299 } 2300 #undef COUNTOF 2301 } 2302 #endif 2303 2304 /* 2305 * Dump the error log of the firmware when a firmware panic occurs. Although 2306 * we can't debug the firmware because it is neither open source nor free, it 2307 * can help us to identify certain classes of problems. 2308 */ 2309 static void 2310 wpi_fatal_intr(struct wpi_softc *sc) 2311 { 2312 struct wpi_fw_dump dump; 2313 uint32_t i, offset, count; 2314 const uint32_t size_errmsg = 2315 (sizeof (wpi_fw_errmsg) / sizeof ((wpi_fw_errmsg)[0])); 2316 2317 /* Check that the error log address is valid. */ 2318 if (sc->errptr < WPI_FW_DATA_BASE || 2319 sc->errptr + sizeof (dump) > 2320 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2321 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2322 sc->errptr); 2323 return; 2324 } 2325 if (wpi_nic_lock(sc) != 0) { 2326 printf("%s: could not read firmware error log\n", __func__); 2327 return; 2328 } 2329 /* Read number of entries in the log. */ 2330 count = wpi_mem_read(sc, sc->errptr); 2331 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2332 printf("%s: invalid count field (count = %u)\n", __func__, 2333 count); 2334 wpi_nic_unlock(sc); 2335 return; 2336 } 2337 /* Skip "count" field. */ 2338 offset = sc->errptr + sizeof (uint32_t); 2339 printf("firmware error log (count = %u):\n", count); 2340 for (i = 0; i < count; i++) { 2341 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2342 sizeof (dump) / sizeof (uint32_t)); 2343 2344 printf(" error type = \"%s\" (0x%08X)\n", 2345 (dump.desc < size_errmsg) ? 2346 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2347 dump.desc); 2348 printf(" error data = 0x%08X\n", 2349 dump.data); 2350 printf(" branch link = 0x%08X%08X\n", 2351 dump.blink[0], dump.blink[1]); 2352 printf(" interrupt link = 0x%08X%08X\n", 2353 dump.ilink[0], dump.ilink[1]); 2354 printf(" time = %u\n", dump.time); 2355 2356 offset += sizeof (dump); 2357 } 2358 wpi_nic_unlock(sc); 2359 /* Dump driver status (TX and RX rings) while we're here. */ 2360 printf("driver status:\n"); 2361 WPI_TXQ_LOCK(sc); 2362 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2363 struct wpi_tx_ring *ring = &sc->txq[i]; 2364 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2365 i, ring->qid, ring->cur, ring->queued); 2366 } 2367 WPI_TXQ_UNLOCK(sc); 2368 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2369 } 2370 2371 static void 2372 wpi_intr(void *arg) 2373 { 2374 struct wpi_softc *sc = arg; 2375 struct ifnet *ifp = sc->sc_ifp; 2376 uint32_t r1, r2; 2377 2378 WPI_LOCK(sc); 2379 2380 /* Disable interrupts. */ 2381 WPI_WRITE(sc, WPI_INT_MASK, 0); 2382 2383 r1 = WPI_READ(sc, WPI_INT); 2384 2385 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2386 goto end; /* Hardware gone! */ 2387 2388 r2 = WPI_READ(sc, WPI_FH_INT); 2389 2390 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2391 r1, r2); 2392 2393 if (r1 == 0 && r2 == 0) 2394 goto done; /* Interrupt not for us. */ 2395 2396 /* Acknowledge interrupts. */ 2397 WPI_WRITE(sc, WPI_INT, r1); 2398 WPI_WRITE(sc, WPI_FH_INT, r2); 2399 2400 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2401 device_printf(sc->sc_dev, "fatal firmware error\n"); 2402 #ifdef WPI_DEBUG 2403 wpi_debug_registers(sc); 2404 #endif 2405 wpi_fatal_intr(sc); 2406 DPRINTF(sc, WPI_DEBUG_HW, 2407 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2408 "(Hardware Error)"); 2409 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2410 goto end; 2411 } 2412 2413 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2414 (r2 & WPI_FH_INT_RX)) 2415 wpi_notif_intr(sc); 2416 2417 if (r1 & WPI_INT_ALIVE) 2418 wakeup(sc); /* Firmware is alive. */ 2419 2420 if (r1 & WPI_INT_WAKEUP) 2421 wpi_wakeup_intr(sc); 2422 2423 done: 2424 /* Re-enable interrupts. */ 2425 if (ifp->if_flags & IFF_UP) 2426 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2427 2428 end: WPI_UNLOCK(sc); 2429 } 2430 2431 static int 2432 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2433 { 2434 struct ifnet *ifp = sc->sc_ifp; 2435 struct ieee80211_frame *wh; 2436 struct wpi_tx_cmd *cmd; 2437 struct wpi_tx_data *data; 2438 struct wpi_tx_desc *desc; 2439 struct wpi_tx_ring *ring; 2440 struct mbuf *m1; 2441 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2442 int error, i, hdrlen, nsegs, totlen, pad; 2443 2444 WPI_TXQ_LOCK(sc); 2445 2446 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2447 2448 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2449 2450 if (sc->txq_active == 0) { 2451 /* wpi_stop() was called */ 2452 error = ENETDOWN; 2453 goto fail; 2454 } 2455 2456 wh = mtod(buf->m, struct ieee80211_frame *); 2457 hdrlen = ieee80211_anyhdrsize(wh); 2458 totlen = buf->m->m_pkthdr.len; 2459 2460 if (hdrlen & 3) { 2461 /* First segment length must be a multiple of 4. */ 2462 pad = 4 - (hdrlen & 3); 2463 } else 2464 pad = 0; 2465 2466 ring = &sc->txq[buf->ac]; 2467 desc = &ring->desc[ring->cur]; 2468 data = &ring->data[ring->cur]; 2469 2470 /* Prepare TX firmware command. */ 2471 cmd = &ring->cmd[ring->cur]; 2472 cmd->code = buf->code; 2473 cmd->flags = 0; 2474 cmd->qid = ring->qid; 2475 cmd->idx = ring->cur; 2476 2477 memcpy(cmd->data, buf->data, buf->size); 2478 2479 /* Save and trim IEEE802.11 header. */ 2480 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2481 m_adj(buf->m, hdrlen); 2482 2483 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2484 segs, &nsegs, BUS_DMA_NOWAIT); 2485 if (error != 0 && error != EFBIG) { 2486 device_printf(sc->sc_dev, 2487 "%s: can't map mbuf (error %d)\n", __func__, error); 2488 goto fail; 2489 } 2490 if (error != 0) { 2491 /* Too many DMA segments, linearize mbuf. */ 2492 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2493 if (m1 == NULL) { 2494 device_printf(sc->sc_dev, 2495 "%s: could not defrag mbuf\n", __func__); 2496 error = ENOBUFS; 2497 goto fail; 2498 } 2499 buf->m = m1; 2500 2501 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2502 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2503 if (error != 0) { 2504 device_printf(sc->sc_dev, 2505 "%s: can't map mbuf (error %d)\n", __func__, 2506 error); 2507 goto fail; 2508 } 2509 } 2510 2511 KASSERT(nsegs < WPI_MAX_SCATTER, 2512 ("too many DMA segments, nsegs (%d) should be less than %d", 2513 nsegs, WPI_MAX_SCATTER)); 2514 2515 data->m = buf->m; 2516 data->ni = buf->ni; 2517 2518 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2519 __func__, ring->qid, ring->cur, totlen, nsegs); 2520 2521 /* Fill TX descriptor. */ 2522 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2523 /* First DMA segment is used by the TX command. */ 2524 desc->segs[0].addr = htole32(data->cmd_paddr); 2525 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2526 /* Other DMA segments are for data payload. */ 2527 seg = &segs[0]; 2528 for (i = 1; i <= nsegs; i++) { 2529 desc->segs[i].addr = htole32(seg->ds_addr); 2530 desc->segs[i].len = htole32(seg->ds_len); 2531 seg++; 2532 } 2533 2534 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2535 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2536 BUS_DMASYNC_PREWRITE); 2537 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2538 BUS_DMASYNC_PREWRITE); 2539 2540 /* Kick TX ring. */ 2541 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2542 wpi_update_tx_ring(sc, ring); 2543 2544 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2545 /* Mark TX ring as full if we reach a certain threshold. */ 2546 WPI_TXQ_STATE_LOCK(sc); 2547 if (++ring->queued > WPI_TX_RING_HIMARK) { 2548 sc->qfullmsk |= 1 << ring->qid; 2549 2550 IF_LOCK(&ifp->if_snd); 2551 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2552 IF_UNLOCK(&ifp->if_snd); 2553 } 2554 2555 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2556 WPI_TXQ_STATE_UNLOCK(sc); 2557 } 2558 2559 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2560 2561 WPI_TXQ_UNLOCK(sc); 2562 2563 return 0; 2564 2565 fail: m_freem(buf->m); 2566 2567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2568 2569 WPI_TXQ_UNLOCK(sc); 2570 2571 return error; 2572 } 2573 2574 /* 2575 * Construct the data packet for a transmit buffer. 2576 */ 2577 static int 2578 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2579 { 2580 const struct ieee80211_txparam *tp; 2581 struct ieee80211vap *vap = ni->ni_vap; 2582 struct ieee80211com *ic = ni->ni_ic; 2583 struct wpi_node *wn = WPI_NODE(ni); 2584 struct ieee80211_channel *chan; 2585 struct ieee80211_frame *wh; 2586 struct ieee80211_key *k = NULL; 2587 struct wpi_buf tx_data; 2588 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2589 uint32_t flags; 2590 uint16_t qos; 2591 uint8_t tid, type; 2592 int ac, error, swcrypt, rate, ismcast, totlen; 2593 2594 wh = mtod(m, struct ieee80211_frame *); 2595 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2596 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2597 2598 /* Select EDCA Access Category and TX ring for this frame. */ 2599 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2600 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2601 tid = qos & IEEE80211_QOS_TID; 2602 } else { 2603 qos = 0; 2604 tid = 0; 2605 } 2606 ac = M_WME_GETAC(m); 2607 2608 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2609 ni->ni_chan : ic->ic_curchan; 2610 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2611 2612 /* Choose a TX rate index. */ 2613 if (type == IEEE80211_FC0_TYPE_MGT) 2614 rate = tp->mgmtrate; 2615 else if (ismcast) 2616 rate = tp->mcastrate; 2617 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2618 rate = tp->ucastrate; 2619 else if (m->m_flags & M_EAPOL) 2620 rate = tp->mgmtrate; 2621 else { 2622 /* XXX pass pktlen */ 2623 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2624 rate = ni->ni_txrate; 2625 } 2626 2627 /* Encrypt the frame if need be. */ 2628 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2629 /* Retrieve key for TX. */ 2630 k = ieee80211_crypto_encap(ni, m); 2631 if (k == NULL) { 2632 error = ENOBUFS; 2633 goto fail; 2634 } 2635 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2636 2637 /* 802.11 header may have moved. */ 2638 wh = mtod(m, struct ieee80211_frame *); 2639 } 2640 totlen = m->m_pkthdr.len; 2641 2642 if (ieee80211_radiotap_active_vap(vap)) { 2643 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2644 2645 tap->wt_flags = 0; 2646 tap->wt_rate = rate; 2647 if (k != NULL) 2648 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2649 2650 ieee80211_radiotap_tx(vap, m); 2651 } 2652 2653 flags = 0; 2654 if (!ismcast) { 2655 /* Unicast frame, check if an ACK is expected. */ 2656 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2657 IEEE80211_QOS_ACKPOLICY_NOACK) 2658 flags |= WPI_TX_NEED_ACK; 2659 } 2660 2661 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2662 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2663 2664 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2665 if (!ismcast) { 2666 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2667 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2668 flags |= WPI_TX_NEED_RTS; 2669 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2670 WPI_RATE_IS_OFDM(rate)) { 2671 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2672 flags |= WPI_TX_NEED_CTS; 2673 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2674 flags |= WPI_TX_NEED_RTS; 2675 } 2676 2677 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2678 flags |= WPI_TX_FULL_TXOP; 2679 } 2680 2681 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2682 if (type == IEEE80211_FC0_TYPE_MGT) { 2683 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2684 2685 /* Tell HW to set timestamp in probe responses. */ 2686 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2687 flags |= WPI_TX_INSERT_TSTAMP; 2688 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2689 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2690 tx->timeout = htole16(3); 2691 else 2692 tx->timeout = htole16(2); 2693 } 2694 2695 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2696 tx->id = WPI_ID_BROADCAST; 2697 else { 2698 if (wn->id == WPI_ID_UNDEFINED) { 2699 device_printf(sc->sc_dev, 2700 "%s: undefined node id\n", __func__); 2701 error = EINVAL; 2702 goto fail; 2703 } 2704 2705 tx->id = wn->id; 2706 } 2707 2708 if (type != IEEE80211_FC0_TYPE_MGT) 2709 tx->data_ntries = tp->maxretry; 2710 2711 if (k != NULL && !swcrypt) { 2712 switch (k->wk_cipher->ic_cipher) { 2713 case IEEE80211_CIPHER_AES_CCM: 2714 tx->security = WPI_CIPHER_CCMP; 2715 break; 2716 2717 default: 2718 break; 2719 } 2720 2721 memcpy(tx->key, k->wk_key, k->wk_keylen); 2722 } 2723 2724 tx->len = htole16(totlen); 2725 tx->flags = htole32(flags); 2726 tx->plcp = rate2plcp(rate); 2727 tx->tid = tid; 2728 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2729 tx->ofdm_mask = 0xff; 2730 tx->cck_mask = 0x0f; 2731 tx->rts_ntries = 7; 2732 2733 tx_data.ni = ni; 2734 tx_data.m = m; 2735 tx_data.size = sizeof(struct wpi_cmd_data); 2736 tx_data.code = WPI_CMD_TX_DATA; 2737 tx_data.ac = ac; 2738 2739 return wpi_cmd2(sc, &tx_data); 2740 2741 fail: m_freem(m); 2742 return error; 2743 } 2744 2745 static int 2746 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2747 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2748 { 2749 struct ieee80211vap *vap = ni->ni_vap; 2750 struct ieee80211_key *k = NULL; 2751 struct ieee80211_frame *wh; 2752 struct wpi_buf tx_data; 2753 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2754 uint32_t flags; 2755 uint8_t type; 2756 int ac, rate, swcrypt, totlen; 2757 2758 wh = mtod(m, struct ieee80211_frame *); 2759 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2760 2761 ac = params->ibp_pri & 3; 2762 2763 /* Choose a TX rate index. */ 2764 rate = params->ibp_rate0; 2765 2766 flags = 0; 2767 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2768 flags |= WPI_TX_NEED_ACK; 2769 if (params->ibp_flags & IEEE80211_BPF_RTS) 2770 flags |= WPI_TX_NEED_RTS; 2771 if (params->ibp_flags & IEEE80211_BPF_CTS) 2772 flags |= WPI_TX_NEED_CTS; 2773 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2774 flags |= WPI_TX_FULL_TXOP; 2775 2776 /* Encrypt the frame if need be. */ 2777 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2778 /* Retrieve key for TX. */ 2779 k = ieee80211_crypto_encap(ni, m); 2780 if (k == NULL) { 2781 m_freem(m); 2782 return ENOBUFS; 2783 } 2784 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2785 2786 /* 802.11 header may have moved. */ 2787 wh = mtod(m, struct ieee80211_frame *); 2788 } 2789 totlen = m->m_pkthdr.len; 2790 2791 if (ieee80211_radiotap_active_vap(vap)) { 2792 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2793 2794 tap->wt_flags = 0; 2795 tap->wt_rate = rate; 2796 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2797 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2798 2799 ieee80211_radiotap_tx(vap, m); 2800 } 2801 2802 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2803 if (type == IEEE80211_FC0_TYPE_MGT) { 2804 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2805 2806 /* Tell HW to set timestamp in probe responses. */ 2807 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2808 flags |= WPI_TX_INSERT_TSTAMP; 2809 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2810 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2811 tx->timeout = htole16(3); 2812 else 2813 tx->timeout = htole16(2); 2814 } 2815 2816 if (k != NULL && !swcrypt) { 2817 switch (k->wk_cipher->ic_cipher) { 2818 case IEEE80211_CIPHER_AES_CCM: 2819 tx->security = WPI_CIPHER_CCMP; 2820 break; 2821 2822 default: 2823 break; 2824 } 2825 2826 memcpy(tx->key, k->wk_key, k->wk_keylen); 2827 } 2828 2829 tx->len = htole16(totlen); 2830 tx->flags = htole32(flags); 2831 tx->plcp = rate2plcp(rate); 2832 tx->id = WPI_ID_BROADCAST; 2833 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2834 tx->rts_ntries = params->ibp_try1; 2835 tx->data_ntries = params->ibp_try0; 2836 2837 tx_data.ni = ni; 2838 tx_data.m = m; 2839 tx_data.size = sizeof(struct wpi_cmd_data); 2840 tx_data.code = WPI_CMD_TX_DATA; 2841 tx_data.ac = ac; 2842 2843 return wpi_cmd2(sc, &tx_data); 2844 } 2845 2846 static int 2847 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2848 const struct ieee80211_bpf_params *params) 2849 { 2850 struct ieee80211com *ic = ni->ni_ic; 2851 struct ifnet *ifp = ic->ic_ifp; 2852 struct wpi_softc *sc = ifp->if_softc; 2853 int error = 0; 2854 2855 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2856 2857 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2858 ieee80211_free_node(ni); 2859 m_freem(m); 2860 return ENETDOWN; 2861 } 2862 2863 WPI_TX_LOCK(sc); 2864 if (params == NULL) { 2865 /* 2866 * Legacy path; interpret frame contents to decide 2867 * precisely how to send the frame. 2868 */ 2869 error = wpi_tx_data(sc, m, ni); 2870 } else { 2871 /* 2872 * Caller supplied explicit parameters to use in 2873 * sending the frame. 2874 */ 2875 error = wpi_tx_data_raw(sc, m, ni, params); 2876 } 2877 WPI_TX_UNLOCK(sc); 2878 2879 if (error != 0) { 2880 /* NB: m is reclaimed on tx failure */ 2881 ieee80211_free_node(ni); 2882 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2883 2884 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2885 2886 return error; 2887 } 2888 2889 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2890 2891 return 0; 2892 } 2893 2894 /** 2895 * Process data waiting to be sent on the IFNET output queue 2896 */ 2897 static void 2898 wpi_start(struct ifnet *ifp) 2899 { 2900 struct wpi_softc *sc = ifp->if_softc; 2901 struct ieee80211_node *ni; 2902 struct mbuf *m; 2903 2904 WPI_TX_LOCK(sc); 2905 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 2906 2907 for (;;) { 2908 IF_LOCK(&ifp->if_snd); 2909 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2910 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2911 IF_UNLOCK(&ifp->if_snd); 2912 break; 2913 } 2914 IF_UNLOCK(&ifp->if_snd); 2915 2916 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 2917 if (m == NULL) 2918 break; 2919 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 2920 if (wpi_tx_data(sc, m, ni) != 0) { 2921 ieee80211_free_node(ni); 2922 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2923 } 2924 } 2925 2926 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 2927 WPI_TX_UNLOCK(sc); 2928 } 2929 2930 static void 2931 wpi_start_task(void *arg0, int pending) 2932 { 2933 struct wpi_softc *sc = arg0; 2934 struct ifnet *ifp = sc->sc_ifp; 2935 2936 wpi_start(ifp); 2937 } 2938 2939 static void 2940 wpi_watchdog_rfkill(void *arg) 2941 { 2942 struct wpi_softc *sc = arg; 2943 struct ifnet *ifp = sc->sc_ifp; 2944 struct ieee80211com *ic = ifp->if_l2com; 2945 2946 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 2947 2948 /* No need to lock firmware memory. */ 2949 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 2950 /* Radio kill switch is still off. */ 2951 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 2952 sc); 2953 } else 2954 ieee80211_runtask(ic, &sc->sc_radioon_task); 2955 } 2956 2957 static void 2958 wpi_scan_timeout(void *arg) 2959 { 2960 struct wpi_softc *sc = arg; 2961 struct ifnet *ifp = sc->sc_ifp; 2962 2963 if_printf(ifp, "scan timeout\n"); 2964 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2965 } 2966 2967 static void 2968 wpi_tx_timeout(void *arg) 2969 { 2970 struct wpi_softc *sc = arg; 2971 struct ifnet *ifp = sc->sc_ifp; 2972 2973 if_printf(ifp, "device timeout\n"); 2974 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2975 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2976 } 2977 2978 static int 2979 wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2980 { 2981 struct wpi_softc *sc = ifp->if_softc; 2982 struct ieee80211com *ic = ifp->if_l2com; 2983 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2984 struct ifreq *ifr = (struct ifreq *) data; 2985 int error = 0; 2986 2987 switch (cmd) { 2988 case SIOCGIFADDR: 2989 error = ether_ioctl(ifp, cmd, data); 2990 break; 2991 case SIOCSIFFLAGS: 2992 if (ifp->if_flags & IFF_UP) { 2993 wpi_init(sc); 2994 2995 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && 2996 vap != NULL) 2997 ieee80211_stop(vap); 2998 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2999 wpi_stop(sc); 3000 break; 3001 case SIOCGIFMEDIA: 3002 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3003 break; 3004 default: 3005 error = EINVAL; 3006 break; 3007 } 3008 return error; 3009 } 3010 3011 /* 3012 * Send a command to the firmware. 3013 */ 3014 static int 3015 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3016 int async) 3017 { 3018 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3019 struct wpi_tx_desc *desc; 3020 struct wpi_tx_data *data; 3021 struct wpi_tx_cmd *cmd; 3022 struct mbuf *m; 3023 bus_addr_t paddr; 3024 int totlen, error; 3025 3026 WPI_TXQ_LOCK(sc); 3027 3028 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3029 3030 if (sc->txq_active == 0) { 3031 /* wpi_stop() was called */ 3032 error = 0; 3033 goto fail; 3034 } 3035 3036 if (async == 0) 3037 WPI_LOCK_ASSERT(sc); 3038 3039 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3040 __func__, wpi_cmd_str(code), size, async); 3041 3042 desc = &ring->desc[ring->cur]; 3043 data = &ring->data[ring->cur]; 3044 totlen = 4 + size; 3045 3046 if (size > sizeof cmd->data) { 3047 /* Command is too large to fit in a descriptor. */ 3048 if (totlen > MCLBYTES) { 3049 error = EINVAL; 3050 goto fail; 3051 } 3052 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3053 if (m == NULL) { 3054 error = ENOMEM; 3055 goto fail; 3056 } 3057 cmd = mtod(m, struct wpi_tx_cmd *); 3058 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3059 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3060 if (error != 0) { 3061 m_freem(m); 3062 goto fail; 3063 } 3064 data->m = m; 3065 } else { 3066 cmd = &ring->cmd[ring->cur]; 3067 paddr = data->cmd_paddr; 3068 } 3069 3070 cmd->code = code; 3071 cmd->flags = 0; 3072 cmd->qid = ring->qid; 3073 cmd->idx = ring->cur; 3074 memcpy(cmd->data, buf, size); 3075 3076 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3077 desc->segs[0].addr = htole32(paddr); 3078 desc->segs[0].len = htole32(totlen); 3079 3080 if (size > sizeof cmd->data) { 3081 bus_dmamap_sync(ring->data_dmat, data->map, 3082 BUS_DMASYNC_PREWRITE); 3083 } else { 3084 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3085 BUS_DMASYNC_PREWRITE); 3086 } 3087 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3088 BUS_DMASYNC_PREWRITE); 3089 3090 /* Kick command ring. */ 3091 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3092 wpi_update_tx_ring(sc, ring); 3093 3094 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3095 3096 WPI_TXQ_UNLOCK(sc); 3097 3098 if (async) 3099 return 0; 3100 3101 return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3102 3103 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3104 3105 WPI_TXQ_UNLOCK(sc); 3106 3107 return error; 3108 } 3109 3110 /* 3111 * Configure HW multi-rate retries. 3112 */ 3113 static int 3114 wpi_mrr_setup(struct wpi_softc *sc) 3115 { 3116 struct ifnet *ifp = sc->sc_ifp; 3117 struct ieee80211com *ic = ifp->if_l2com; 3118 struct wpi_mrr_setup mrr; 3119 int i, error; 3120 3121 /* CCK rates (not used with 802.11a). */ 3122 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3123 mrr.rates[i].flags = 0; 3124 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3125 /* Fallback to the immediate lower CCK rate (if any.) */ 3126 mrr.rates[i].next = 3127 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3128 /* Try one time at this rate before falling back to "next". */ 3129 mrr.rates[i].ntries = 1; 3130 } 3131 /* OFDM rates (not used with 802.11b). */ 3132 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3133 mrr.rates[i].flags = 0; 3134 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3135 /* Fallback to the immediate lower rate (if any.) */ 3136 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3137 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3138 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3139 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3140 i - 1; 3141 /* Try one time at this rate before falling back to "next". */ 3142 mrr.rates[i].ntries = 1; 3143 } 3144 /* Setup MRR for control frames. */ 3145 mrr.which = htole32(WPI_MRR_CTL); 3146 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3147 if (error != 0) { 3148 device_printf(sc->sc_dev, 3149 "could not setup MRR for control frames\n"); 3150 return error; 3151 } 3152 /* Setup MRR for data frames. */ 3153 mrr.which = htole32(WPI_MRR_DATA); 3154 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3155 if (error != 0) { 3156 device_printf(sc->sc_dev, 3157 "could not setup MRR for data frames\n"); 3158 return error; 3159 } 3160 return 0; 3161 } 3162 3163 static int 3164 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3165 { 3166 struct ieee80211com *ic = ni->ni_ic; 3167 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3168 struct wpi_node *wn = WPI_NODE(ni); 3169 struct wpi_node_info node; 3170 int error; 3171 3172 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3173 3174 if (wn->id == WPI_ID_UNDEFINED) 3175 return EINVAL; 3176 3177 memset(&node, 0, sizeof node); 3178 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3179 node.id = wn->id; 3180 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3181 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3182 node.action = htole32(WPI_ACTION_SET_RATE); 3183 node.antenna = WPI_ANTENNA_BOTH; 3184 3185 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3186 wn->id, ether_sprintf(ni->ni_macaddr)); 3187 3188 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3189 if (error != 0) { 3190 device_printf(sc->sc_dev, 3191 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3192 error); 3193 return error; 3194 } 3195 3196 if (wvp->wv_gtk != 0) { 3197 error = wpi_set_global_keys(ni); 3198 if (error != 0) { 3199 device_printf(sc->sc_dev, 3200 "%s: error while setting global keys\n", __func__); 3201 return ENXIO; 3202 } 3203 } 3204 3205 return 0; 3206 } 3207 3208 /* 3209 * Broadcast node is used to send group-addressed and management frames. 3210 */ 3211 static int 3212 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3213 { 3214 struct ifnet *ifp = sc->sc_ifp; 3215 struct ieee80211com *ic = ifp->if_l2com; 3216 struct wpi_node_info node; 3217 3218 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3219 3220 memset(&node, 0, sizeof node); 3221 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3222 node.id = WPI_ID_BROADCAST; 3223 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3224 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3225 node.action = htole32(WPI_ACTION_SET_RATE); 3226 node.antenna = WPI_ANTENNA_BOTH; 3227 3228 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3229 3230 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3231 } 3232 3233 static int 3234 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3235 { 3236 struct wpi_node *wn = WPI_NODE(ni); 3237 int error; 3238 3239 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3240 3241 wn->id = wpi_add_node_entry_sta(sc); 3242 3243 if ((error = wpi_add_node(sc, ni)) != 0) { 3244 wpi_del_node_entry(sc, wn->id); 3245 wn->id = WPI_ID_UNDEFINED; 3246 return error; 3247 } 3248 3249 return 0; 3250 } 3251 3252 static int 3253 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3254 { 3255 struct wpi_node *wn = WPI_NODE(ni); 3256 int error; 3257 3258 KASSERT(wn->id == WPI_ID_UNDEFINED, 3259 ("the node %d was added before", wn->id)); 3260 3261 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3262 3263 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3264 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3265 return ENOMEM; 3266 } 3267 3268 if ((error = wpi_add_node(sc, ni)) != 0) { 3269 wpi_del_node_entry(sc, wn->id); 3270 wn->id = WPI_ID_UNDEFINED; 3271 return error; 3272 } 3273 3274 return 0; 3275 } 3276 3277 static void 3278 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3279 { 3280 struct wpi_node *wn = WPI_NODE(ni); 3281 struct wpi_cmd_del_node node; 3282 int error; 3283 3284 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3285 3286 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3287 3288 memset(&node, 0, sizeof node); 3289 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3290 node.count = 1; 3291 3292 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3293 wn->id, ether_sprintf(ni->ni_macaddr)); 3294 3295 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3296 if (error != 0) { 3297 device_printf(sc->sc_dev, 3298 "%s: could not delete node %u, error %d\n", __func__, 3299 wn->id, error); 3300 } 3301 } 3302 3303 static int 3304 wpi_updateedca(struct ieee80211com *ic) 3305 { 3306 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3307 struct wpi_softc *sc = ic->ic_ifp->if_softc; 3308 struct wpi_edca_params cmd; 3309 int aci, error; 3310 3311 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3312 3313 memset(&cmd, 0, sizeof cmd); 3314 cmd.flags = htole32(WPI_EDCA_UPDATE); 3315 for (aci = 0; aci < WME_NUM_AC; aci++) { 3316 const struct wmeParams *ac = 3317 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3318 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3319 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3320 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3321 cmd.ac[aci].txoplimit = 3322 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3323 3324 DPRINTF(sc, WPI_DEBUG_EDCA, 3325 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3326 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3327 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3328 cmd.ac[aci].txoplimit); 3329 } 3330 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3331 3332 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3333 3334 return error; 3335 #undef WPI_EXP2 3336 } 3337 3338 static void 3339 wpi_set_promisc(struct wpi_softc *sc) 3340 { 3341 struct ifnet *ifp = sc->sc_ifp; 3342 struct ieee80211com *ic = ifp->if_l2com; 3343 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3344 uint32_t promisc_filter; 3345 3346 promisc_filter = WPI_FILTER_CTL; 3347 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3348 promisc_filter |= WPI_FILTER_PROMISC; 3349 3350 if (ifp->if_flags & IFF_PROMISC) 3351 sc->rxon.filter |= htole32(promisc_filter); 3352 else 3353 sc->rxon.filter &= ~htole32(promisc_filter); 3354 } 3355 3356 static void 3357 wpi_update_promisc(struct ifnet *ifp) 3358 { 3359 struct wpi_softc *sc = ifp->if_softc; 3360 3361 WPI_RXON_LOCK(sc); 3362 wpi_set_promisc(sc); 3363 3364 if (wpi_send_rxon(sc, 1, 1) != 0) { 3365 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3366 __func__); 3367 } 3368 WPI_RXON_UNLOCK(sc); 3369 } 3370 3371 static void 3372 wpi_update_mcast(struct ifnet *ifp) 3373 { 3374 /* Ignore */ 3375 } 3376 3377 static void 3378 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3379 { 3380 struct wpi_cmd_led led; 3381 3382 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3383 3384 led.which = which; 3385 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3386 led.off = off; 3387 led.on = on; 3388 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3389 } 3390 3391 static int 3392 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3393 { 3394 struct wpi_cmd_timing cmd; 3395 uint64_t val, mod; 3396 3397 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3398 3399 memset(&cmd, 0, sizeof cmd); 3400 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3401 cmd.bintval = htole16(ni->ni_intval); 3402 cmd.lintval = htole16(10); 3403 3404 /* Compute remaining time until next beacon. */ 3405 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3406 mod = le64toh(cmd.tstamp) % val; 3407 cmd.binitval = htole32((uint32_t)(val - mod)); 3408 3409 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3410 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3411 3412 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3413 } 3414 3415 /* 3416 * This function is called periodically (every 60 seconds) to adjust output 3417 * power to temperature changes. 3418 */ 3419 static void 3420 wpi_power_calibration(struct wpi_softc *sc) 3421 { 3422 int temp; 3423 3424 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3425 3426 /* Update sensor data. */ 3427 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3428 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3429 3430 /* Sanity-check read value. */ 3431 if (temp < -260 || temp > 25) { 3432 /* This can't be correct, ignore. */ 3433 DPRINTF(sc, WPI_DEBUG_TEMP, 3434 "out-of-range temperature reported: %d\n", temp); 3435 return; 3436 } 3437 3438 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3439 3440 /* Adjust Tx power if need be. */ 3441 if (abs(temp - sc->temp) <= 6) 3442 return; 3443 3444 sc->temp = temp; 3445 3446 if (wpi_set_txpower(sc, 1) != 0) { 3447 /* just warn, too bad for the automatic calibration... */ 3448 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3449 } 3450 } 3451 3452 /* 3453 * Set TX power for current channel. 3454 */ 3455 static int 3456 wpi_set_txpower(struct wpi_softc *sc, int async) 3457 { 3458 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3459 struct ieee80211_channel *ch; 3460 struct wpi_power_group *group; 3461 struct wpi_cmd_txpower cmd; 3462 uint8_t chan; 3463 int idx, i; 3464 3465 /* Retrieve current channel from last RXON. */ 3466 chan = sc->rxon.chan; 3467 ch = &ic->ic_channels[chan]; 3468 3469 /* Find the TX power group to which this channel belongs. */ 3470 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3471 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3472 if (chan <= group->chan) 3473 break; 3474 } else 3475 group = &sc->groups[0]; 3476 3477 memset(&cmd, 0, sizeof cmd); 3478 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3479 cmd.chan = htole16(chan); 3480 3481 /* Set TX power for all OFDM and CCK rates. */ 3482 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3483 /* Retrieve TX power for this channel/rate. */ 3484 idx = wpi_get_power_index(sc, group, ch, i); 3485 3486 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3487 3488 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3489 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3490 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3491 } else { 3492 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3493 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3494 } 3495 DPRINTF(sc, WPI_DEBUG_TEMP, 3496 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3497 } 3498 3499 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3500 } 3501 3502 /* 3503 * Determine Tx power index for a given channel/rate combination. 3504 * This takes into account the regulatory information from EEPROM and the 3505 * current temperature. 3506 */ 3507 static int 3508 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3509 struct ieee80211_channel *c, int ridx) 3510 { 3511 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3512 #define fdivround(a, b, n) \ 3513 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3514 3515 /* Linear interpolation. */ 3516 #define interpolate(x, x1, y1, x2, y2, n) \ 3517 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3518 3519 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3520 struct wpi_power_sample *sample; 3521 int pwr, idx; 3522 u_int chan; 3523 3524 /* Get channel number. */ 3525 chan = ieee80211_chan2ieee(ic, c); 3526 3527 /* Default TX power is group maximum TX power minus 3dB. */ 3528 pwr = group->maxpwr / 2; 3529 3530 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3531 switch (ridx) { 3532 case WPI_RIDX_OFDM36: 3533 pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 0 : 5; 3534 break; 3535 case WPI_RIDX_OFDM48: 3536 pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 7 : 10; 3537 break; 3538 case WPI_RIDX_OFDM54: 3539 pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 9 : 12; 3540 break; 3541 } 3542 3543 /* Never exceed the channel maximum allowed TX power. */ 3544 pwr = min(pwr, sc->maxpwr[chan]); 3545 3546 /* Retrieve TX power index into gain tables from samples. */ 3547 for (sample = group->samples; sample < &group->samples[3]; sample++) 3548 if (pwr > sample[1].power) 3549 break; 3550 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3551 idx = interpolate(pwr, sample[0].power, sample[0].index, 3552 sample[1].power, sample[1].index, 19); 3553 3554 /*- 3555 * Adjust power index based on current temperature: 3556 * - if cooler than factory-calibrated: decrease output power 3557 * - if warmer than factory-calibrated: increase output power 3558 */ 3559 idx -= (sc->temp - group->temp) * 11 / 100; 3560 3561 /* Decrease TX power for CCK rates (-5dB). */ 3562 if (ridx >= WPI_RIDX_CCK1) 3563 idx += 10; 3564 3565 /* Make sure idx stays in a valid range. */ 3566 if (idx < 0) 3567 return 0; 3568 if (idx > WPI_MAX_PWR_INDEX) 3569 return WPI_MAX_PWR_INDEX; 3570 return idx; 3571 3572 #undef interpolate 3573 #undef fdivround 3574 } 3575 3576 /* 3577 * Set STA mode power saving level (between 0 and 5). 3578 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3579 */ 3580 static int 3581 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3582 { 3583 struct wpi_pmgt_cmd cmd; 3584 const struct wpi_pmgt *pmgt; 3585 uint32_t max, skip_dtim; 3586 uint32_t reg; 3587 int i; 3588 3589 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3590 "%s: dtim=%d, level=%d, async=%d\n", 3591 __func__, dtim, level, async); 3592 3593 /* Select which PS parameters to use. */ 3594 if (dtim <= 10) 3595 pmgt = &wpi_pmgt[0][level]; 3596 else 3597 pmgt = &wpi_pmgt[1][level]; 3598 3599 memset(&cmd, 0, sizeof cmd); 3600 if (level != 0) /* not CAM */ 3601 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3602 /* Retrieve PCIe Active State Power Management (ASPM). */ 3603 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3604 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3605 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3606 3607 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3608 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3609 3610 if (dtim == 0) { 3611 dtim = 1; 3612 skip_dtim = 0; 3613 } else 3614 skip_dtim = pmgt->skip_dtim; 3615 3616 if (skip_dtim != 0) { 3617 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3618 max = pmgt->intval[4]; 3619 if (max == (uint32_t)-1) 3620 max = dtim * (skip_dtim + 1); 3621 else if (max > dtim) 3622 max = (max / dtim) * dtim; 3623 } else 3624 max = dtim; 3625 3626 for (i = 0; i < 5; i++) 3627 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3628 3629 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3630 } 3631 3632 static int 3633 wpi_send_btcoex(struct wpi_softc *sc) 3634 { 3635 struct wpi_bluetooth cmd; 3636 3637 memset(&cmd, 0, sizeof cmd); 3638 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3639 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3640 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3641 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3642 __func__); 3643 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3644 } 3645 3646 static int 3647 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3648 { 3649 int error; 3650 3651 if (async) 3652 WPI_RXON_LOCK_ASSERT(sc); 3653 3654 if (assoc && (sc->rxon.filter & htole32(WPI_FILTER_BSS))) { 3655 struct wpi_assoc rxon_assoc; 3656 3657 rxon_assoc.flags = sc->rxon.flags; 3658 rxon_assoc.filter = sc->rxon.filter; 3659 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3660 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3661 rxon_assoc.reserved = 0; 3662 3663 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3664 sizeof (struct wpi_assoc), async); 3665 if (error != 0) { 3666 device_printf(sc->sc_dev, 3667 "RXON_ASSOC command failed, error %d\n", error); 3668 return error; 3669 } 3670 } else { 3671 if (async) { 3672 WPI_NT_LOCK(sc); 3673 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3674 sizeof (struct wpi_rxon), async); 3675 if (error == 0) 3676 wpi_clear_node_table(sc); 3677 WPI_NT_UNLOCK(sc); 3678 } else { 3679 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3680 sizeof (struct wpi_rxon), async); 3681 if (error == 0) 3682 wpi_clear_node_table(sc); 3683 } 3684 3685 if (error != 0) { 3686 device_printf(sc->sc_dev, 3687 "RXON command failed, error %d\n", error); 3688 return error; 3689 } 3690 3691 /* Add broadcast node. */ 3692 error = wpi_add_broadcast_node(sc, async); 3693 if (error != 0) { 3694 device_printf(sc->sc_dev, 3695 "could not add broadcast node, error %d\n", error); 3696 return error; 3697 } 3698 } 3699 3700 /* Configuration has changed, set Tx power accordingly. */ 3701 if ((error = wpi_set_txpower(sc, async)) != 0) { 3702 device_printf(sc->sc_dev, 3703 "%s: could not set TX power, error %d\n", __func__, error); 3704 return error; 3705 } 3706 3707 return 0; 3708 } 3709 3710 /** 3711 * Configure the card to listen to a particular channel, this transisions the 3712 * card in to being able to receive frames from remote devices. 3713 */ 3714 static int 3715 wpi_config(struct wpi_softc *sc) 3716 { 3717 struct ifnet *ifp = sc->sc_ifp; 3718 struct ieee80211com *ic = ifp->if_l2com; 3719 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3720 uint32_t flags; 3721 int error; 3722 3723 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3724 3725 /* Set power saving level to CAM during initialization. */ 3726 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3727 device_printf(sc->sc_dev, 3728 "%s: could not set power saving level\n", __func__); 3729 return error; 3730 } 3731 3732 /* Configure bluetooth coexistence. */ 3733 if ((error = wpi_send_btcoex(sc)) != 0) { 3734 device_printf(sc->sc_dev, 3735 "could not configure bluetooth coexistence\n"); 3736 return error; 3737 } 3738 3739 /* Configure adapter. */ 3740 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3741 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3742 3743 /* Set default channel. */ 3744 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 3745 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3746 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 3747 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3748 3749 sc->rxon.filter = WPI_FILTER_MULTICAST; 3750 switch (ic->ic_opmode) { 3751 case IEEE80211_M_STA: 3752 sc->rxon.mode = WPI_MODE_STA; 3753 break; 3754 case IEEE80211_M_IBSS: 3755 sc->rxon.mode = WPI_MODE_IBSS; 3756 sc->rxon.filter |= WPI_FILTER_BEACON; 3757 break; 3758 case IEEE80211_M_HOSTAP: 3759 /* XXX workaround for beaconing */ 3760 sc->rxon.mode = WPI_MODE_IBSS; 3761 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3762 break; 3763 case IEEE80211_M_AHDEMO: 3764 /* XXX workaround for passive channels selection */ 3765 sc->rxon.mode = WPI_MODE_HOSTAP; 3766 break; 3767 case IEEE80211_M_MONITOR: 3768 sc->rxon.mode = WPI_MODE_MONITOR; 3769 break; 3770 default: 3771 device_printf(sc->sc_dev, "unknown opmode %d\n", 3772 ic->ic_opmode); 3773 return EINVAL; 3774 } 3775 sc->rxon.filter = htole32(sc->rxon.filter); 3776 wpi_set_promisc(sc); 3777 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3778 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3779 3780 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3781 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3782 __func__); 3783 return error; 3784 } 3785 3786 /* Setup rate scalling. */ 3787 if ((error = wpi_mrr_setup(sc)) != 0) { 3788 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3789 error); 3790 return error; 3791 } 3792 3793 /* Disable beacon notifications (unused). */ 3794 flags = WPI_STATISTICS_BEACON_DISABLE; 3795 error = wpi_cmd(sc, WPI_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3796 if (error != 0) { 3797 device_printf(sc->sc_dev, 3798 "could not disable beacon statistics, error %d\n", error); 3799 return error; 3800 } 3801 3802 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3803 3804 return 0; 3805 } 3806 3807 static uint16_t 3808 wpi_get_active_dwell_time(struct wpi_softc *sc, 3809 struct ieee80211_channel *c, uint8_t n_probes) 3810 { 3811 /* No channel? Default to 2GHz settings. */ 3812 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3813 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3814 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3815 } 3816 3817 /* 5GHz dwell time. */ 3818 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3819 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3820 } 3821 3822 /* 3823 * Limit the total dwell time to 85% of the beacon interval. 3824 * 3825 * Returns the dwell time in milliseconds. 3826 */ 3827 static uint16_t 3828 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 3829 { 3830 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3831 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3832 int bintval = 0; 3833 3834 /* bintval is in TU (1.024mS) */ 3835 if (vap != NULL) 3836 bintval = vap->iv_bss->ni_intval; 3837 3838 /* 3839 * If it's non-zero, we should calculate the minimum of 3840 * it and the DWELL_BASE. 3841 * 3842 * XXX Yes, the math should take into account that bintval 3843 * is 1.024mS, not 1mS.. 3844 */ 3845 if (bintval > 0) { 3846 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 3847 bintval); 3848 return (MIN(WPI_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 3849 } 3850 3851 /* No association context? Default. */ 3852 return (WPI_PASSIVE_DWELL_BASE); 3853 } 3854 3855 static uint16_t 3856 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 3857 { 3858 uint16_t passive; 3859 3860 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 3861 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 3862 else 3863 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 3864 3865 /* Clamp to the beacon interval if we're associated. */ 3866 return (wpi_limit_dwell(sc, passive)); 3867 } 3868 3869 /* 3870 * Send a scan request to the firmware. 3871 */ 3872 static int 3873 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 3874 { 3875 struct ifnet *ifp = sc->sc_ifp; 3876 struct ieee80211com *ic = ifp->if_l2com; 3877 struct ieee80211_scan_state *ss = ic->ic_scan; 3878 struct ieee80211vap *vap = ss->ss_vap; 3879 struct wpi_scan_hdr *hdr; 3880 struct wpi_cmd_data *tx; 3881 struct wpi_scan_essid *essids; 3882 struct wpi_scan_chan *chan; 3883 struct ieee80211_frame *wh; 3884 struct ieee80211_rateset *rs; 3885 uint16_t dwell_active, dwell_passive; 3886 uint8_t *buf, *frm; 3887 int buflen, error, i, nssid; 3888 3889 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3890 3891 /* 3892 * We are absolutely not allowed to send a scan command when another 3893 * scan command is pending. 3894 */ 3895 if (callout_pending(&sc->scan_timeout)) { 3896 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 3897 __func__); 3898 3899 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3900 3901 return (EAGAIN); 3902 } 3903 3904 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 3905 if (buf == NULL) { 3906 device_printf(sc->sc_dev, 3907 "%s: could not allocate buffer for scan command\n", 3908 __func__); 3909 error = ENOMEM; 3910 goto fail; 3911 } 3912 hdr = (struct wpi_scan_hdr *)buf; 3913 3914 /* 3915 * Move to the next channel if no packets are received within 10 msecs 3916 * after sending the probe request. 3917 */ 3918 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 3919 hdr->quiet_threshold = htole16(1); /* min # of packets */ 3920 /* 3921 * Max needs to be greater than active and passive and quiet! 3922 * It's also in microseconds! 3923 */ 3924 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 3925 hdr->pause_svc = htole32((4 << 24) | 3926 (100 * IEEE80211_DUR_TU)); /* Hardcode for now */ 3927 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 3928 3929 tx = (struct wpi_cmd_data *)(hdr + 1); 3930 tx->flags = htole32(WPI_TX_AUTO_SEQ); 3931 tx->id = WPI_ID_BROADCAST; 3932 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3933 3934 if (IEEE80211_IS_CHAN_5GHZ(c)) { 3935 /* Send probe requests at 6Mbps. */ 3936 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 3937 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 3938 } else { 3939 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 3940 /* Send probe requests at 1Mbps. */ 3941 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3942 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 3943 } 3944 3945 essids = (struct wpi_scan_essid *)(tx + 1); 3946 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 3947 for (i = 0; i < nssid; i++) { 3948 essids[i].id = IEEE80211_ELEMID_SSID; 3949 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 3950 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 3951 #ifdef WPI_DEBUG 3952 if (sc->sc_debug & WPI_DEBUG_SCAN) { 3953 printf("Scanning Essid: "); 3954 ieee80211_print_essid(essids[i].data, essids[i].len); 3955 printf("\n"); 3956 } 3957 #endif 3958 } 3959 3960 /* 3961 * Build a probe request frame. Most of the following code is a 3962 * copy & paste of what is done in net80211. 3963 */ 3964 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 3965 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3966 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3967 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3968 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 3969 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 3970 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 3971 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ 3972 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ 3973 3974 frm = (uint8_t *)(wh + 1); 3975 frm = ieee80211_add_ssid(frm, NULL, 0); 3976 frm = ieee80211_add_rates(frm, rs); 3977 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 3978 frm = ieee80211_add_xrates(frm, rs); 3979 3980 /* Set length of probe request. */ 3981 tx->len = htole16(frm - (uint8_t *)wh); 3982 3983 /* 3984 * Construct information about the channel that we 3985 * want to scan. The firmware expects this to be directly 3986 * after the scan probe request 3987 */ 3988 chan = (struct wpi_scan_chan *)frm; 3989 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 3990 chan->flags = 0; 3991 if (nssid) { 3992 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 3993 chan->flags |= WPI_CHAN_NPBREQS(nssid); 3994 } else 3995 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 3996 3997 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 3998 chan->flags |= WPI_CHAN_ACTIVE; 3999 4000 /* 4001 * Calculate the active/passive dwell times. 4002 */ 4003 4004 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4005 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4006 4007 /* Make sure they're valid. */ 4008 if (dwell_passive <= dwell_active) 4009 dwell_passive = dwell_active + 1; 4010 4011 chan->active = htole16(dwell_active); 4012 chan->passive = htole16(dwell_passive); 4013 4014 chan->dsp_gain = 0x6e; /* Default level */ 4015 4016 if (IEEE80211_IS_CHAN_5GHZ(c)) 4017 chan->rf_gain = 0x3b; 4018 else 4019 chan->rf_gain = 0x28; 4020 4021 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4022 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4023 4024 hdr->nchan++; 4025 chan++; 4026 4027 buflen = (uint8_t *)chan - buf; 4028 hdr->len = htole16(buflen); 4029 4030 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4031 hdr->nchan); 4032 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4033 free(buf, M_DEVBUF); 4034 4035 if (error != 0) 4036 goto fail; 4037 4038 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4039 4040 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4041 4042 return 0; 4043 4044 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4045 4046 return error; 4047 } 4048 4049 static int 4050 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4051 { 4052 struct ieee80211com *ic = vap->iv_ic; 4053 struct ieee80211_node *ni = vap->iv_bss; 4054 int error; 4055 4056 WPI_RXON_LOCK(sc); 4057 4058 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4059 4060 /* Update adapter configuration. */ 4061 sc->rxon.associd = 0; 4062 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4063 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4064 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4065 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4066 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4067 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4068 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4069 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4070 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4071 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4072 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4073 sc->rxon.cck_mask = 0; 4074 sc->rxon.ofdm_mask = 0x15; 4075 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4076 sc->rxon.cck_mask = 0x03; 4077 sc->rxon.ofdm_mask = 0; 4078 } else { 4079 /* Assume 802.11b/g. */ 4080 sc->rxon.cck_mask = 0x0f; 4081 sc->rxon.ofdm_mask = 0x15; 4082 } 4083 4084 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4085 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4086 sc->rxon.ofdm_mask); 4087 4088 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4089 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4090 __func__); 4091 } 4092 4093 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4094 4095 WPI_RXON_UNLOCK(sc); 4096 4097 return error; 4098 } 4099 4100 static int 4101 wpi_config_beacon(struct wpi_vap *wvp) 4102 { 4103 struct ieee80211com *ic = wvp->wv_vap.iv_ic; 4104 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4105 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4106 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4107 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4108 struct ieee80211_tim_ie *tie; 4109 struct mbuf *m; 4110 uint8_t *ptr; 4111 int error; 4112 4113 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4114 4115 WPI_VAP_LOCK_ASSERT(wvp); 4116 4117 cmd->len = htole16(bcn->m->m_pkthdr.len); 4118 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4119 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4120 4121 /* XXX seems to be unused */ 4122 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4123 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4124 ptr = mtod(bcn->m, uint8_t *); 4125 4126 cmd->tim = htole16(bo->bo_tim - ptr); 4127 cmd->timsz = tie->tim_len; 4128 } 4129 4130 /* Necessary for recursion in ieee80211_beacon_update(). */ 4131 m = bcn->m; 4132 bcn->m = m_dup(m, M_NOWAIT); 4133 if (bcn->m == NULL) { 4134 device_printf(sc->sc_dev, 4135 "%s: could not copy beacon frame\n", __func__); 4136 error = ENOMEM; 4137 goto end; 4138 } 4139 4140 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4141 device_printf(sc->sc_dev, 4142 "%s: could not update beacon frame, error %d", __func__, 4143 error); 4144 } 4145 4146 /* Restore mbuf. */ 4147 end: bcn->m = m; 4148 4149 return error; 4150 } 4151 4152 static int 4153 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4154 { 4155 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 4156 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4157 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4158 struct mbuf *m; 4159 int error; 4160 4161 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4162 4163 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4164 return EINVAL; 4165 4166 m = ieee80211_beacon_alloc(ni, bo); 4167 if (m == NULL) { 4168 device_printf(sc->sc_dev, 4169 "%s: could not allocate beacon frame\n", __func__); 4170 return ENOMEM; 4171 } 4172 4173 WPI_VAP_LOCK(wvp); 4174 if (bcn->m != NULL) 4175 m_freem(bcn->m); 4176 4177 bcn->m = m; 4178 4179 error = wpi_config_beacon(wvp); 4180 WPI_VAP_UNLOCK(wvp); 4181 4182 return error; 4183 } 4184 4185 static void 4186 wpi_update_beacon(struct ieee80211vap *vap, int item) 4187 { 4188 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4189 struct wpi_vap *wvp = WPI_VAP(vap); 4190 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4191 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4192 struct ieee80211_node *ni = vap->iv_bss; 4193 int mcast = 0; 4194 4195 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4196 4197 WPI_VAP_LOCK(wvp); 4198 if (bcn->m == NULL) { 4199 bcn->m = ieee80211_beacon_alloc(ni, bo); 4200 if (bcn->m == NULL) { 4201 device_printf(sc->sc_dev, 4202 "%s: could not allocate beacon frame\n", __func__); 4203 4204 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4205 __func__); 4206 4207 WPI_VAP_UNLOCK(wvp); 4208 return; 4209 } 4210 } 4211 WPI_VAP_UNLOCK(wvp); 4212 4213 if (item == IEEE80211_BEACON_TIM) 4214 mcast = 1; /* TODO */ 4215 4216 setbit(bo->bo_flags, item); 4217 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4218 4219 WPI_VAP_LOCK(wvp); 4220 wpi_config_beacon(wvp); 4221 WPI_VAP_UNLOCK(wvp); 4222 4223 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4224 } 4225 4226 static void 4227 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4228 { 4229 struct ieee80211vap *vap = ni->ni_vap; 4230 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4231 struct wpi_node *wn = WPI_NODE(ni); 4232 int error; 4233 4234 WPI_NT_LOCK(sc); 4235 4236 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4237 4238 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4239 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4240 device_printf(sc->sc_dev, 4241 "%s: could not add IBSS node, error %d\n", 4242 __func__, error); 4243 } 4244 } 4245 WPI_NT_UNLOCK(sc); 4246 } 4247 4248 static int 4249 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4250 { 4251 struct ieee80211com *ic = vap->iv_ic; 4252 struct ieee80211_node *ni = vap->iv_bss; 4253 int error; 4254 4255 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4256 4257 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4258 /* Link LED blinks while monitoring. */ 4259 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4260 return 0; 4261 } 4262 4263 /* XXX kernel panic workaround */ 4264 if (ni->ni_chan == IEEE80211_CHAN_ANYC) { 4265 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4266 __func__); 4267 return EINVAL; 4268 } 4269 4270 if ((error = wpi_set_timing(sc, ni)) != 0) { 4271 device_printf(sc->sc_dev, 4272 "%s: could not set timing, error %d\n", __func__, error); 4273 return error; 4274 } 4275 4276 /* Update adapter configuration. */ 4277 WPI_RXON_LOCK(sc); 4278 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4279 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4280 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 4281 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4282 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4283 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4284 /* Short preamble and slot time are negotiated when associating. */ 4285 sc->rxon.flags &= ~htole32(WPI_RXON_SHPREAMBLE | WPI_RXON_SHSLOT); 4286 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4287 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4288 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4289 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4290 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4291 sc->rxon.cck_mask = 0; 4292 sc->rxon.ofdm_mask = 0x15; 4293 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4294 sc->rxon.cck_mask = 0x03; 4295 sc->rxon.ofdm_mask = 0; 4296 } else { 4297 /* Assume 802.11b/g. */ 4298 sc->rxon.cck_mask = 0x0f; 4299 sc->rxon.ofdm_mask = 0x15; 4300 } 4301 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4302 4303 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4304 sc->rxon.chan, sc->rxon.flags); 4305 4306 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4307 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4308 __func__); 4309 return error; 4310 } 4311 4312 /* Start periodic calibration timer. */ 4313 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4314 4315 WPI_RXON_UNLOCK(sc); 4316 4317 if (vap->iv_opmode == IEEE80211_M_IBSS || 4318 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4319 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4320 device_printf(sc->sc_dev, 4321 "%s: could not setup beacon, error %d\n", __func__, 4322 error); 4323 return error; 4324 } 4325 } 4326 4327 if (vap->iv_opmode == IEEE80211_M_STA) { 4328 /* Add BSS node. */ 4329 WPI_NT_LOCK(sc); 4330 error = wpi_add_sta_node(sc, ni); 4331 WPI_NT_UNLOCK(sc); 4332 if (error != 0) { 4333 device_printf(sc->sc_dev, 4334 "%s: could not add BSS node, error %d\n", __func__, 4335 error); 4336 return error; 4337 } 4338 } 4339 4340 /* Link LED always on while associated. */ 4341 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4342 4343 /* Enable power-saving mode if requested by user. */ 4344 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4345 vap->iv_opmode != IEEE80211_M_IBSS) 4346 (void)wpi_set_pslevel(sc, 0, 3, 1); 4347 4348 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4349 4350 return 0; 4351 } 4352 4353 static int 4354 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4355 { 4356 const struct ieee80211_cipher *cip = k->wk_cipher; 4357 struct ieee80211vap *vap = ni->ni_vap; 4358 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4359 struct wpi_node *wn = WPI_NODE(ni); 4360 struct wpi_node_info node; 4361 uint16_t kflags; 4362 int error; 4363 4364 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4365 4366 if (wpi_check_node_entry(sc, wn->id) == 0) { 4367 device_printf(sc->sc_dev, "%s: node does not exist\n", 4368 __func__); 4369 return 0; 4370 } 4371 4372 switch (cip->ic_cipher) { 4373 case IEEE80211_CIPHER_AES_CCM: 4374 kflags = WPI_KFLAG_CCMP; 4375 break; 4376 4377 default: 4378 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4379 cip->ic_cipher); 4380 return 0; 4381 } 4382 4383 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4384 if (k->wk_flags & IEEE80211_KEY_GROUP) 4385 kflags |= WPI_KFLAG_MULTICAST; 4386 4387 memset(&node, 0, sizeof node); 4388 node.id = wn->id; 4389 node.control = WPI_NODE_UPDATE; 4390 node.flags = WPI_FLAG_KEY_SET; 4391 node.kflags = htole16(kflags); 4392 memcpy(node.key, k->wk_key, k->wk_keylen); 4393 again: 4394 DPRINTF(sc, WPI_DEBUG_KEY, 4395 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4396 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4397 node.id, ether_sprintf(ni->ni_macaddr)); 4398 4399 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4400 if (error != 0) { 4401 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4402 error); 4403 return !error; 4404 } 4405 4406 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4407 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4408 kflags |= WPI_KFLAG_MULTICAST; 4409 node.kflags = htole16(kflags); 4410 4411 goto again; 4412 } 4413 4414 return 1; 4415 } 4416 4417 static void 4418 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4419 { 4420 const struct ieee80211_key *k = arg; 4421 struct ieee80211vap *vap = ni->ni_vap; 4422 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4423 struct wpi_node *wn = WPI_NODE(ni); 4424 int error; 4425 4426 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4427 return; 4428 4429 WPI_NT_LOCK(sc); 4430 error = wpi_load_key(ni, k); 4431 WPI_NT_UNLOCK(sc); 4432 4433 if (error == 0) { 4434 device_printf(sc->sc_dev, "%s: error while setting key\n", 4435 __func__); 4436 } 4437 } 4438 4439 static int 4440 wpi_set_global_keys(struct ieee80211_node *ni) 4441 { 4442 struct ieee80211vap *vap = ni->ni_vap; 4443 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4444 int error = 1; 4445 4446 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4447 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4448 error = wpi_load_key(ni, wk); 4449 4450 return !error; 4451 } 4452 4453 static int 4454 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4455 { 4456 struct ieee80211vap *vap = ni->ni_vap; 4457 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4458 struct wpi_node *wn = WPI_NODE(ni); 4459 struct wpi_node_info node; 4460 uint16_t kflags; 4461 int error; 4462 4463 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4464 4465 if (wpi_check_node_entry(sc, wn->id) == 0) { 4466 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4467 return 1; /* Nothing to do. */ 4468 } 4469 4470 kflags = WPI_KFLAG_KID(k->wk_keyix); 4471 if (k->wk_flags & IEEE80211_KEY_GROUP) 4472 kflags |= WPI_KFLAG_MULTICAST; 4473 4474 memset(&node, 0, sizeof node); 4475 node.id = wn->id; 4476 node.control = WPI_NODE_UPDATE; 4477 node.flags = WPI_FLAG_KEY_SET; 4478 node.kflags = htole16(kflags); 4479 again: 4480 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4481 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4482 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4483 4484 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4485 if (error != 0) { 4486 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4487 error); 4488 return !error; 4489 } 4490 4491 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4492 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4493 kflags |= WPI_KFLAG_MULTICAST; 4494 node.kflags = htole16(kflags); 4495 4496 goto again; 4497 } 4498 4499 return 1; 4500 } 4501 4502 static void 4503 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4504 { 4505 const struct ieee80211_key *k = arg; 4506 struct ieee80211vap *vap = ni->ni_vap; 4507 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4508 struct wpi_node *wn = WPI_NODE(ni); 4509 int error; 4510 4511 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4512 return; 4513 4514 WPI_NT_LOCK(sc); 4515 error = wpi_del_key(ni, k); 4516 WPI_NT_UNLOCK(sc); 4517 4518 if (error == 0) { 4519 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4520 __func__); 4521 } 4522 } 4523 4524 static int 4525 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4526 int set) 4527 { 4528 struct ieee80211com *ic = vap->iv_ic; 4529 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4530 struct wpi_vap *wvp = WPI_VAP(vap); 4531 struct ieee80211_node *ni; 4532 int error, ni_ref = 0; 4533 4534 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4535 4536 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4537 /* Not for us. */ 4538 return 1; 4539 } 4540 4541 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4542 /* XMIT keys are handled in wpi_tx_data(). */ 4543 return 1; 4544 } 4545 4546 /* Handle group keys. */ 4547 if (&vap->iv_nw_keys[0] <= k && 4548 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4549 WPI_NT_LOCK(sc); 4550 if (set) 4551 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4552 else 4553 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4554 WPI_NT_UNLOCK(sc); 4555 4556 if (vap->iv_state == IEEE80211_S_RUN) { 4557 ieee80211_iterate_nodes(&ic->ic_sta, 4558 set ? wpi_load_key_cb : wpi_del_key_cb, (void *)k); 4559 } 4560 4561 return 1; 4562 } 4563 4564 switch (vap->iv_opmode) { 4565 case IEEE80211_M_STA: 4566 ni = vap->iv_bss; 4567 break; 4568 4569 case IEEE80211_M_IBSS: 4570 case IEEE80211_M_AHDEMO: 4571 case IEEE80211_M_HOSTAP: 4572 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4573 if (ni == NULL) 4574 return 0; /* should not happen */ 4575 4576 ni_ref = 1; 4577 break; 4578 4579 default: 4580 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4581 vap->iv_opmode); 4582 return 0; 4583 } 4584 4585 WPI_NT_LOCK(sc); 4586 if (set) 4587 error = wpi_load_key(ni, k); 4588 else 4589 error = wpi_del_key(ni, k); 4590 WPI_NT_UNLOCK(sc); 4591 4592 if (ni_ref) 4593 ieee80211_node_decref(ni); 4594 4595 return error; 4596 } 4597 4598 static int 4599 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4600 const uint8_t mac[IEEE80211_ADDR_LEN]) 4601 { 4602 return wpi_process_key(vap, k, 1); 4603 } 4604 4605 static int 4606 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4607 { 4608 return wpi_process_key(vap, k, 0); 4609 } 4610 4611 /* 4612 * This function is called after the runtime firmware notifies us of its 4613 * readiness (called in a process context). 4614 */ 4615 static int 4616 wpi_post_alive(struct wpi_softc *sc) 4617 { 4618 int ntries, error; 4619 4620 /* Check (again) that the radio is not disabled. */ 4621 if ((error = wpi_nic_lock(sc)) != 0) 4622 return error; 4623 4624 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4625 4626 /* NB: Runtime firmware must be up and running. */ 4627 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4628 device_printf(sc->sc_dev, 4629 "RF switch: radio disabled (%s)\n", __func__); 4630 wpi_nic_unlock(sc); 4631 return EPERM; /* :-) */ 4632 } 4633 wpi_nic_unlock(sc); 4634 4635 /* Wait for thermal sensor to calibrate. */ 4636 for (ntries = 0; ntries < 1000; ntries++) { 4637 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4638 break; 4639 DELAY(10); 4640 } 4641 4642 if (ntries == 1000) { 4643 device_printf(sc->sc_dev, 4644 "timeout waiting for thermal sensor calibration\n"); 4645 return ETIMEDOUT; 4646 } 4647 4648 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4649 return 0; 4650 } 4651 4652 /* 4653 * The firmware boot code is small and is intended to be copied directly into 4654 * the NIC internal memory (no DMA transfer). 4655 */ 4656 static int 4657 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4658 { 4659 int error, ntries; 4660 4661 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4662 4663 size /= sizeof (uint32_t); 4664 4665 if ((error = wpi_nic_lock(sc)) != 0) 4666 return error; 4667 4668 /* Copy microcode image into NIC memory. */ 4669 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4670 (const uint32_t *)ucode, size); 4671 4672 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4673 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4674 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4675 4676 /* Start boot load now. */ 4677 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4678 4679 /* Wait for transfer to complete. */ 4680 for (ntries = 0; ntries < 1000; ntries++) { 4681 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4682 DPRINTF(sc, WPI_DEBUG_HW, 4683 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4684 WPI_FH_TX_STATUS_IDLE(6), 4685 status & WPI_FH_TX_STATUS_IDLE(6)); 4686 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4687 DPRINTF(sc, WPI_DEBUG_HW, 4688 "Status Match! - ntries = %d\n", ntries); 4689 break; 4690 } 4691 DELAY(10); 4692 } 4693 if (ntries == 1000) { 4694 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4695 __func__); 4696 wpi_nic_unlock(sc); 4697 return ETIMEDOUT; 4698 } 4699 4700 /* Enable boot after power up. */ 4701 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4702 4703 wpi_nic_unlock(sc); 4704 return 0; 4705 } 4706 4707 static int 4708 wpi_load_firmware(struct wpi_softc *sc) 4709 { 4710 struct wpi_fw_info *fw = &sc->fw; 4711 struct wpi_dma_info *dma = &sc->fw_dma; 4712 int error; 4713 4714 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4715 4716 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4717 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4718 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4719 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4720 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4721 4722 /* Tell adapter where to find initialization sections. */ 4723 if ((error = wpi_nic_lock(sc)) != 0) 4724 return error; 4725 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4726 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4727 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4728 dma->paddr + WPI_FW_DATA_MAXSZ); 4729 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4730 wpi_nic_unlock(sc); 4731 4732 /* Load firmware boot code. */ 4733 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4734 if (error != 0) { 4735 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4736 __func__); 4737 return error; 4738 } 4739 4740 /* Now press "execute". */ 4741 WPI_WRITE(sc, WPI_RESET, 0); 4742 4743 /* Wait at most one second for first alive notification. */ 4744 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4745 device_printf(sc->sc_dev, 4746 "%s: timeout waiting for adapter to initialize, error %d\n", 4747 __func__, error); 4748 return error; 4749 } 4750 4751 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4752 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4753 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4754 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4755 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4756 4757 /* Tell adapter where to find runtime sections. */ 4758 if ((error = wpi_nic_lock(sc)) != 0) 4759 return error; 4760 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4761 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4762 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4763 dma->paddr + WPI_FW_DATA_MAXSZ); 4764 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4765 WPI_FW_UPDATED | fw->main.textsz); 4766 wpi_nic_unlock(sc); 4767 4768 return 0; 4769 } 4770 4771 static int 4772 wpi_read_firmware(struct wpi_softc *sc) 4773 { 4774 const struct firmware *fp; 4775 struct wpi_fw_info *fw = &sc->fw; 4776 const struct wpi_firmware_hdr *hdr; 4777 int error; 4778 4779 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4780 4781 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4782 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4783 4784 WPI_UNLOCK(sc); 4785 fp = firmware_get(WPI_FW_NAME); 4786 WPI_LOCK(sc); 4787 4788 if (fp == NULL) { 4789 device_printf(sc->sc_dev, 4790 "could not load firmware image '%s'\n", WPI_FW_NAME); 4791 return EINVAL; 4792 } 4793 4794 sc->fw_fp = fp; 4795 4796 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 4797 device_printf(sc->sc_dev, 4798 "firmware file too short: %zu bytes\n", fp->datasize); 4799 error = EINVAL; 4800 goto fail; 4801 } 4802 4803 fw->size = fp->datasize; 4804 fw->data = (const uint8_t *)fp->data; 4805 4806 /* Extract firmware header information. */ 4807 hdr = (const struct wpi_firmware_hdr *)fw->data; 4808 4809 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 4810 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 4811 4812 fw->main.textsz = le32toh(hdr->rtextsz); 4813 fw->main.datasz = le32toh(hdr->rdatasz); 4814 fw->init.textsz = le32toh(hdr->itextsz); 4815 fw->init.datasz = le32toh(hdr->idatasz); 4816 fw->boot.textsz = le32toh(hdr->btextsz); 4817 fw->boot.datasz = 0; 4818 4819 /* Sanity-check firmware header. */ 4820 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 4821 fw->main.datasz > WPI_FW_DATA_MAXSZ || 4822 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 4823 fw->init.datasz > WPI_FW_DATA_MAXSZ || 4824 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 4825 (fw->boot.textsz & 3) != 0) { 4826 device_printf(sc->sc_dev, "invalid firmware header\n"); 4827 error = EINVAL; 4828 goto fail; 4829 } 4830 4831 /* Check that all firmware sections fit. */ 4832 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 4833 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 4834 device_printf(sc->sc_dev, 4835 "firmware file too short: %zu bytes\n", fw->size); 4836 error = EINVAL; 4837 goto fail; 4838 } 4839 4840 /* Get pointers to firmware sections. */ 4841 fw->main.text = (const uint8_t *)(hdr + 1); 4842 fw->main.data = fw->main.text + fw->main.textsz; 4843 fw->init.text = fw->main.data + fw->main.datasz; 4844 fw->init.data = fw->init.text + fw->init.textsz; 4845 fw->boot.text = fw->init.data + fw->init.datasz; 4846 4847 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4848 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 4849 "runtime (text: %u, data: %u) init (text: %u, data %u) " 4850 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 4851 fw->main.textsz, fw->main.datasz, 4852 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 4853 4854 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 4855 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 4856 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 4857 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 4858 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 4859 4860 return 0; 4861 4862 fail: wpi_unload_firmware(sc); 4863 return error; 4864 } 4865 4866 /** 4867 * Free the referenced firmware image 4868 */ 4869 static void 4870 wpi_unload_firmware(struct wpi_softc *sc) 4871 { 4872 if (sc->fw_fp != NULL) { 4873 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 4874 sc->fw_fp = NULL; 4875 } 4876 } 4877 4878 static int 4879 wpi_clock_wait(struct wpi_softc *sc) 4880 { 4881 int ntries; 4882 4883 /* Set "initialization complete" bit. */ 4884 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4885 4886 /* Wait for clock stabilization. */ 4887 for (ntries = 0; ntries < 2500; ntries++) { 4888 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 4889 return 0; 4890 DELAY(100); 4891 } 4892 device_printf(sc->sc_dev, 4893 "%s: timeout waiting for clock stabilization\n", __func__); 4894 4895 return ETIMEDOUT; 4896 } 4897 4898 static int 4899 wpi_apm_init(struct wpi_softc *sc) 4900 { 4901 uint32_t reg; 4902 int error; 4903 4904 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4905 4906 /* Disable L0s exit timer (NMI bug workaround). */ 4907 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 4908 /* Don't wait for ICH L0s (ICH bug workaround). */ 4909 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 4910 4911 /* Set FH wait threshold to max (HW bug under stress workaround). */ 4912 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 4913 4914 /* Cleanup. */ 4915 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 4916 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000E00); 4917 4918 /* Retrieve PCIe Active State Power Management (ASPM). */ 4919 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4920 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 4921 if (reg & 0x02) /* L1 Entry enabled. */ 4922 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4923 else 4924 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4925 4926 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 4927 4928 /* Wait for clock stabilization before accessing prph. */ 4929 if ((error = wpi_clock_wait(sc)) != 0) 4930 return error; 4931 4932 if ((error = wpi_nic_lock(sc)) != 0) 4933 return error; 4934 /* Enable DMA and BSM (Bootstrap State Machine). */ 4935 wpi_prph_write(sc, WPI_APMG_CLK_EN, 4936 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 4937 DELAY(20); 4938 /* Disable L1-Active. */ 4939 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 4940 wpi_nic_unlock(sc); 4941 4942 return 0; 4943 } 4944 4945 static void 4946 wpi_apm_stop_master(struct wpi_softc *sc) 4947 { 4948 int ntries; 4949 4950 /* Stop busmaster DMA activity. */ 4951 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 4952 4953 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 4954 WPI_GP_CNTRL_MAC_PS) 4955 return; /* Already asleep. */ 4956 4957 for (ntries = 0; ntries < 100; ntries++) { 4958 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 4959 return; 4960 DELAY(10); 4961 } 4962 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 4963 __func__); 4964 } 4965 4966 static void 4967 wpi_apm_stop(struct wpi_softc *sc) 4968 { 4969 wpi_apm_stop_master(sc); 4970 4971 /* Reset the entire device. */ 4972 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 4973 DELAY(10); 4974 /* Clear "initialization complete" bit. */ 4975 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4976 } 4977 4978 static void 4979 wpi_nic_config(struct wpi_softc *sc) 4980 { 4981 uint32_t rev; 4982 4983 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4984 4985 /* voodoo from the Linux "driver".. */ 4986 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 4987 if ((rev & 0xc0) == 0x40) 4988 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 4989 else if (!(rev & 0x80)) 4990 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 4991 4992 if (sc->cap == 0x80) 4993 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 4994 4995 if ((sc->rev & 0xf0) == 0xd0) 4996 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 4997 else 4998 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 4999 5000 if (sc->type > 1) 5001 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5002 } 5003 5004 static int 5005 wpi_hw_init(struct wpi_softc *sc) 5006 { 5007 int chnl, ntries, error; 5008 5009 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5010 5011 /* Clear pending interrupts. */ 5012 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5013 5014 if ((error = wpi_apm_init(sc)) != 0) { 5015 device_printf(sc->sc_dev, 5016 "%s: could not power ON adapter, error %d\n", __func__, 5017 error); 5018 return error; 5019 } 5020 5021 /* Select VMAIN power source. */ 5022 if ((error = wpi_nic_lock(sc)) != 0) 5023 return error; 5024 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5025 wpi_nic_unlock(sc); 5026 /* Spin until VMAIN gets selected. */ 5027 for (ntries = 0; ntries < 5000; ntries++) { 5028 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5029 break; 5030 DELAY(10); 5031 } 5032 if (ntries == 5000) { 5033 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5034 return ETIMEDOUT; 5035 } 5036 5037 /* Perform adapter initialization. */ 5038 wpi_nic_config(sc); 5039 5040 /* Initialize RX ring. */ 5041 if ((error = wpi_nic_lock(sc)) != 0) 5042 return error; 5043 /* Set physical address of RX ring. */ 5044 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5045 /* Set physical address of RX read pointer. */ 5046 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5047 offsetof(struct wpi_shared, next)); 5048 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5049 /* Enable RX. */ 5050 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5051 WPI_FH_RX_CONFIG_DMA_ENA | 5052 WPI_FH_RX_CONFIG_RDRBD_ENA | 5053 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5054 WPI_FH_RX_CONFIG_MAXFRAG | 5055 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5056 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5057 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5058 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5059 wpi_nic_unlock(sc); 5060 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5061 5062 /* Initialize TX rings. */ 5063 if ((error = wpi_nic_lock(sc)) != 0) 5064 return error; 5065 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5066 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5067 /* Enable all 6 TX rings. */ 5068 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5069 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5070 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5071 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5072 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5073 /* Set physical address of TX rings. */ 5074 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5075 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5076 5077 /* Enable all DMA channels. */ 5078 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5079 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5080 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5081 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5082 } 5083 wpi_nic_unlock(sc); 5084 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5085 5086 /* Clear "radio off" and "commands blocked" bits. */ 5087 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5088 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5089 5090 /* Clear pending interrupts. */ 5091 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5092 /* Enable interrupts. */ 5093 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5094 5095 /* _Really_ make sure "radio off" bit is cleared! */ 5096 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5097 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5098 5099 if ((error = wpi_load_firmware(sc)) != 0) { 5100 device_printf(sc->sc_dev, 5101 "%s: could not load firmware, error %d\n", __func__, 5102 error); 5103 return error; 5104 } 5105 /* Wait at most one second for firmware alive notification. */ 5106 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5107 device_printf(sc->sc_dev, 5108 "%s: timeout waiting for adapter to initialize, error %d\n", 5109 __func__, error); 5110 return error; 5111 } 5112 5113 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5114 5115 /* Do post-firmware initialization. */ 5116 return wpi_post_alive(sc); 5117 } 5118 5119 static void 5120 wpi_hw_stop(struct wpi_softc *sc) 5121 { 5122 int chnl, qid, ntries; 5123 5124 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5125 5126 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5127 wpi_nic_lock(sc); 5128 5129 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5130 5131 /* Disable interrupts. */ 5132 WPI_WRITE(sc, WPI_INT_MASK, 0); 5133 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5134 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5135 5136 /* Make sure we no longer hold the NIC lock. */ 5137 wpi_nic_unlock(sc); 5138 5139 if (wpi_nic_lock(sc) == 0) { 5140 /* Stop TX scheduler. */ 5141 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5142 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5143 5144 /* Stop all DMA channels. */ 5145 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5146 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5147 for (ntries = 0; ntries < 200; ntries++) { 5148 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5149 WPI_FH_TX_STATUS_IDLE(chnl)) 5150 break; 5151 DELAY(10); 5152 } 5153 } 5154 wpi_nic_unlock(sc); 5155 } 5156 5157 /* Stop RX ring. */ 5158 wpi_reset_rx_ring(sc); 5159 5160 /* Reset all TX rings. */ 5161 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5162 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5163 5164 if (wpi_nic_lock(sc) == 0) { 5165 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5166 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5167 wpi_nic_unlock(sc); 5168 } 5169 DELAY(5); 5170 /* Power OFF adapter. */ 5171 wpi_apm_stop(sc); 5172 } 5173 5174 static void 5175 wpi_radio_on(void *arg0, int pending) 5176 { 5177 struct wpi_softc *sc = arg0; 5178 struct ifnet *ifp = sc->sc_ifp; 5179 struct ieee80211com *ic = ifp->if_l2com; 5180 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5181 5182 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5183 5184 if (vap != NULL) { 5185 wpi_init(sc); 5186 ieee80211_init(vap); 5187 } 5188 5189 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) { 5190 WPI_LOCK(sc); 5191 callout_stop(&sc->watchdog_rfkill); 5192 WPI_UNLOCK(sc); 5193 } 5194 } 5195 5196 static void 5197 wpi_radio_off(void *arg0, int pending) 5198 { 5199 struct wpi_softc *sc = arg0; 5200 struct ifnet *ifp = sc->sc_ifp; 5201 struct ieee80211com *ic = ifp->if_l2com; 5202 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5203 5204 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5205 5206 wpi_stop(sc); 5207 if (vap != NULL) 5208 ieee80211_stop(vap); 5209 5210 WPI_LOCK(sc); 5211 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5212 WPI_UNLOCK(sc); 5213 } 5214 5215 static void 5216 wpi_init(void *arg) 5217 { 5218 struct wpi_softc *sc = arg; 5219 struct ifnet *ifp = sc->sc_ifp; 5220 struct ieee80211com *ic = ifp->if_l2com; 5221 int error; 5222 5223 WPI_LOCK(sc); 5224 5225 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5226 5227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 5228 goto end; 5229 5230 /* Check that the radio is not disabled by hardware switch. */ 5231 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5232 device_printf(sc->sc_dev, 5233 "RF switch: radio disabled (%s)\n", __func__); 5234 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5235 sc); 5236 goto end; 5237 } 5238 5239 /* Read firmware images from the filesystem. */ 5240 if ((error = wpi_read_firmware(sc)) != 0) { 5241 device_printf(sc->sc_dev, 5242 "%s: could not read firmware, error %d\n", __func__, 5243 error); 5244 goto fail; 5245 } 5246 5247 /* Initialize hardware and upload firmware. */ 5248 error = wpi_hw_init(sc); 5249 wpi_unload_firmware(sc); 5250 if (error != 0) { 5251 device_printf(sc->sc_dev, 5252 "%s: could not initialize hardware, error %d\n", __func__, 5253 error); 5254 goto fail; 5255 } 5256 5257 /* Configure adapter now that it is ready. */ 5258 sc->txq_active = 1; 5259 if ((error = wpi_config(sc)) != 0) { 5260 device_printf(sc->sc_dev, 5261 "%s: could not configure device, error %d\n", __func__, 5262 error); 5263 goto fail; 5264 } 5265 5266 IF_LOCK(&ifp->if_snd); 5267 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5268 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5269 IF_UNLOCK(&ifp->if_snd); 5270 5271 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5272 5273 WPI_UNLOCK(sc); 5274 5275 ieee80211_start_all(ic); 5276 5277 return; 5278 5279 fail: wpi_stop_locked(sc); 5280 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5281 WPI_UNLOCK(sc); 5282 } 5283 5284 static void 5285 wpi_stop_locked(struct wpi_softc *sc) 5286 { 5287 struct ifnet *ifp = sc->sc_ifp; 5288 5289 WPI_LOCK_ASSERT(sc); 5290 5291 WPI_TXQ_LOCK(sc); 5292 sc->txq_active = 0; 5293 WPI_TXQ_UNLOCK(sc); 5294 5295 WPI_TXQ_STATE_LOCK(sc); 5296 callout_stop(&sc->tx_timeout); 5297 WPI_TXQ_STATE_UNLOCK(sc); 5298 5299 WPI_RXON_LOCK(sc); 5300 callout_stop(&sc->scan_timeout); 5301 callout_stop(&sc->calib_to); 5302 WPI_RXON_UNLOCK(sc); 5303 5304 IF_LOCK(&ifp->if_snd); 5305 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5306 IF_UNLOCK(&ifp->if_snd); 5307 5308 /* Power OFF hardware. */ 5309 wpi_hw_stop(sc); 5310 } 5311 5312 static void 5313 wpi_stop(struct wpi_softc *sc) 5314 { 5315 WPI_LOCK(sc); 5316 wpi_stop_locked(sc); 5317 WPI_UNLOCK(sc); 5318 } 5319 5320 /* 5321 * Callback from net80211 to start a scan. 5322 */ 5323 static void 5324 wpi_scan_start(struct ieee80211com *ic) 5325 { 5326 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5327 5328 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5329 } 5330 5331 /* 5332 * Callback from net80211 to terminate a scan. 5333 */ 5334 static void 5335 wpi_scan_end(struct ieee80211com *ic) 5336 { 5337 struct ifnet *ifp = ic->ic_ifp; 5338 struct wpi_softc *sc = ifp->if_softc; 5339 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5340 5341 if (vap->iv_state == IEEE80211_S_RUN) 5342 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5343 } 5344 5345 /** 5346 * Called by the net80211 framework to indicate to the driver 5347 * that the channel should be changed 5348 */ 5349 static void 5350 wpi_set_channel(struct ieee80211com *ic) 5351 { 5352 const struct ieee80211_channel *c = ic->ic_curchan; 5353 struct ifnet *ifp = ic->ic_ifp; 5354 struct wpi_softc *sc = ifp->if_softc; 5355 int error; 5356 5357 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5358 5359 WPI_LOCK(sc); 5360 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5361 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5362 WPI_UNLOCK(sc); 5363 WPI_TX_LOCK(sc); 5364 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5365 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5366 WPI_TX_UNLOCK(sc); 5367 5368 /* 5369 * Only need to set the channel in Monitor mode. AP scanning and auth 5370 * are already taken care of by their respective firmware commands. 5371 */ 5372 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5373 WPI_RXON_LOCK(sc); 5374 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5375 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5376 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5377 WPI_RXON_24GHZ); 5378 } else { 5379 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5380 WPI_RXON_24GHZ); 5381 } 5382 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5383 device_printf(sc->sc_dev, 5384 "%s: error %d setting channel\n", __func__, 5385 error); 5386 WPI_RXON_UNLOCK(sc); 5387 } 5388 } 5389 5390 /** 5391 * Called by net80211 to indicate that we need to scan the current 5392 * channel. The channel is previously be set via the wpi_set_channel 5393 * callback. 5394 */ 5395 static void 5396 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5397 { 5398 struct ieee80211vap *vap = ss->ss_vap; 5399 struct ieee80211com *ic = vap->iv_ic; 5400 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5401 int error; 5402 5403 WPI_RXON_LOCK(sc); 5404 if (sc->rxon.chan != ieee80211_chan2ieee(ic, ic->ic_curchan)) { 5405 error = wpi_scan(sc, ic->ic_curchan); 5406 WPI_RXON_UNLOCK(sc); 5407 if (error != 0) 5408 ieee80211_cancel_scan(vap); 5409 } else { 5410 WPI_RXON_UNLOCK(sc); 5411 /* Send probe request when associated. */ 5412 sc->sc_scan_curchan(ss, maxdwell); 5413 } 5414 } 5415 5416 /** 5417 * Called by the net80211 framework to indicate 5418 * the minimum dwell time has been met, terminate the scan. 5419 * We don't actually terminate the scan as the firmware will notify 5420 * us when it's finished and we have no way to interrupt it. 5421 */ 5422 static void 5423 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5424 { 5425 /* NB: don't try to abort scan; wait for firmware to finish */ 5426 } 5427 5428 static void 5429 wpi_hw_reset(void *arg, int pending) 5430 { 5431 struct wpi_softc *sc = arg; 5432 struct ifnet *ifp = sc->sc_ifp; 5433 struct ieee80211com *ic = ifp->if_l2com; 5434 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5435 5436 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5437 5438 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5439 ieee80211_cancel_scan(vap); 5440 5441 wpi_stop(sc); 5442 if (vap != NULL) 5443 ieee80211_stop(vap); 5444 wpi_init(sc); 5445 if (vap != NULL) 5446 ieee80211_init(vap); 5447 } 5448