1 /*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __FBSDID("$FreeBSD$"); 21 22 /* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60 #include "opt_wlan.h" 61 #include "opt_wpi.h" 62 63 #include <sys/param.h> 64 #include <sys/sysctl.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/kernel.h> 68 #include <sys/socket.h> 69 #include <sys/systm.h> 70 #include <sys/malloc.h> 71 #include <sys/queue.h> 72 #include <sys/taskqueue.h> 73 #include <sys/module.h> 74 #include <sys/bus.h> 75 #include <sys/endian.h> 76 #include <sys/linker.h> 77 #include <sys/firmware.h> 78 79 #include <machine/bus.h> 80 #include <machine/resource.h> 81 #include <sys/rman.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_var.h> 89 #include <net/if_arp.h> 90 #include <net/ethernet.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/if_types.h> 94 95 #include <netinet/in.h> 96 #include <netinet/in_systm.h> 97 #include <netinet/in_var.h> 98 #include <netinet/if_ether.h> 99 #include <netinet/ip.h> 100 101 #include <net80211/ieee80211_var.h> 102 #include <net80211/ieee80211_radiotap.h> 103 #include <net80211/ieee80211_regdomain.h> 104 #include <net80211/ieee80211_ratectl.h> 105 106 #include <dev/wpi/if_wpireg.h> 107 #include <dev/wpi/if_wpivar.h> 108 #include <dev/wpi/if_wpi_debug.h> 109 110 struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115 }; 116 117 static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127 }; 128 129 static int wpi_probe(device_t); 130 static int wpi_attach(device_t); 131 static void wpi_radiotap_attach(struct wpi_softc *); 132 static void wpi_sysctlattach(struct wpi_softc *); 133 static void wpi_init_beacon(struct wpi_vap *); 134 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void wpi_vap_delete(struct ieee80211vap *); 139 static int wpi_detach(device_t); 140 static int wpi_shutdown(device_t); 141 static int wpi_suspend(device_t); 142 static int wpi_resume(device_t); 143 static int wpi_nic_lock(struct wpi_softc *); 144 static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145 static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146 static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148 static void wpi_dma_contig_free(struct wpi_dma_info *); 149 static int wpi_alloc_shared(struct wpi_softc *); 150 static void wpi_free_shared(struct wpi_softc *); 151 static int wpi_alloc_fwmem(struct wpi_softc *); 152 static void wpi_free_fwmem(struct wpi_softc *); 153 static int wpi_alloc_rx_ring(struct wpi_softc *); 154 static void wpi_update_rx_ring(struct wpi_softc *); 155 static void wpi_update_rx_ring_ps(struct wpi_softc *); 156 static void wpi_reset_rx_ring(struct wpi_softc *); 157 static void wpi_free_rx_ring(struct wpi_softc *); 158 static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 159 int); 160 static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161 static void wpi_update_tx_ring_ps(struct wpi_softc *, 162 struct wpi_tx_ring *); 163 static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 164 static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 165 static int wpi_read_eeprom(struct wpi_softc *, 166 uint8_t macaddr[IEEE80211_ADDR_LEN]); 167 static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 168 static void wpi_read_eeprom_band(struct wpi_softc *, int); 169 static int wpi_read_eeprom_channels(struct wpi_softc *, int); 170 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 171 struct ieee80211_channel *); 172 static int wpi_setregdomain(struct ieee80211com *, 173 struct ieee80211_regdomain *, int, 174 struct ieee80211_channel[]); 175 static int wpi_read_eeprom_group(struct wpi_softc *, int); 176 static int wpi_add_node_entry_adhoc(struct wpi_softc *); 177 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 178 const uint8_t mac[IEEE80211_ADDR_LEN]); 179 static void wpi_node_free(struct ieee80211_node *); 180 static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, 181 const struct ieee80211_rx_stats *, 182 int, int); 183 static void wpi_restore_node(void *, struct ieee80211_node *); 184 static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *); 185 static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186 static void wpi_calib_timeout(void *); 187 static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 188 struct wpi_rx_data *); 189 static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 190 struct wpi_rx_data *); 191 static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 192 static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 193 static void wpi_notif_intr(struct wpi_softc *); 194 static void wpi_wakeup_intr(struct wpi_softc *); 195 #ifdef WPI_DEBUG 196 static void wpi_debug_registers(struct wpi_softc *); 197 #endif 198 static void wpi_fatal_intr(struct wpi_softc *); 199 static void wpi_intr(void *); 200 static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 201 static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 202 struct ieee80211_node *); 203 static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 204 struct ieee80211_node *, 205 const struct ieee80211_bpf_params *); 206 static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 207 const struct ieee80211_bpf_params *); 208 static void wpi_start(struct ifnet *); 209 static void wpi_start_task(void *, int); 210 static void wpi_watchdog_rfkill(void *); 211 static void wpi_scan_timeout(void *); 212 static void wpi_tx_timeout(void *); 213 static int wpi_ioctl(struct ifnet *, u_long, caddr_t); 214 static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 215 static int wpi_mrr_setup(struct wpi_softc *); 216 static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 217 static int wpi_add_broadcast_node(struct wpi_softc *, int); 218 static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 219 static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 220 static int wpi_updateedca(struct ieee80211com *); 221 static void wpi_set_promisc(struct wpi_softc *); 222 static void wpi_update_promisc(struct ieee80211com *); 223 static void wpi_update_mcast(struct ieee80211com *); 224 static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 225 static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 226 static void wpi_power_calibration(struct wpi_softc *); 227 static int wpi_set_txpower(struct wpi_softc *, int); 228 static int wpi_get_power_index(struct wpi_softc *, 229 struct wpi_power_group *, uint8_t, int, int); 230 static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 231 static int wpi_send_btcoex(struct wpi_softc *); 232 static int wpi_send_rxon(struct wpi_softc *, int, int); 233 static int wpi_config(struct wpi_softc *); 234 static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 235 struct ieee80211_channel *, uint8_t); 236 static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 237 static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 238 struct ieee80211_channel *); 239 static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t); 240 static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 241 static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 242 static int wpi_config_beacon(struct wpi_vap *); 243 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 244 static void wpi_update_beacon(struct ieee80211vap *, int); 245 static void wpi_newassoc(struct ieee80211_node *, int); 246 static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 247 static int wpi_load_key(struct ieee80211_node *, 248 const struct ieee80211_key *); 249 static void wpi_load_key_cb(void *, struct ieee80211_node *); 250 static int wpi_set_global_keys(struct ieee80211_node *); 251 static int wpi_del_key(struct ieee80211_node *, 252 const struct ieee80211_key *); 253 static void wpi_del_key_cb(void *, struct ieee80211_node *); 254 static int wpi_process_key(struct ieee80211vap *, 255 const struct ieee80211_key *, int); 256 static int wpi_key_set(struct ieee80211vap *, 257 const struct ieee80211_key *, 258 const uint8_t mac[IEEE80211_ADDR_LEN]); 259 static int wpi_key_delete(struct ieee80211vap *, 260 const struct ieee80211_key *); 261 static int wpi_post_alive(struct wpi_softc *); 262 static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 263 static int wpi_load_firmware(struct wpi_softc *); 264 static int wpi_read_firmware(struct wpi_softc *); 265 static void wpi_unload_firmware(struct wpi_softc *); 266 static int wpi_clock_wait(struct wpi_softc *); 267 static int wpi_apm_init(struct wpi_softc *); 268 static void wpi_apm_stop_master(struct wpi_softc *); 269 static void wpi_apm_stop(struct wpi_softc *); 270 static void wpi_nic_config(struct wpi_softc *); 271 static int wpi_hw_init(struct wpi_softc *); 272 static void wpi_hw_stop(struct wpi_softc *); 273 static void wpi_radio_on(void *, int); 274 static void wpi_radio_off(void *, int); 275 static void wpi_init(void *); 276 static void wpi_stop_locked(struct wpi_softc *); 277 static void wpi_stop(struct wpi_softc *); 278 static void wpi_scan_start(struct ieee80211com *); 279 static void wpi_scan_end(struct ieee80211com *); 280 static void wpi_set_channel(struct ieee80211com *); 281 static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 282 static void wpi_scan_mindwell(struct ieee80211_scan_state *); 283 static void wpi_hw_reset(void *, int); 284 285 static device_method_t wpi_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, wpi_probe), 288 DEVMETHOD(device_attach, wpi_attach), 289 DEVMETHOD(device_detach, wpi_detach), 290 DEVMETHOD(device_shutdown, wpi_shutdown), 291 DEVMETHOD(device_suspend, wpi_suspend), 292 DEVMETHOD(device_resume, wpi_resume), 293 294 DEVMETHOD_END 295 }; 296 297 static driver_t wpi_driver = { 298 "wpi", 299 wpi_methods, 300 sizeof (struct wpi_softc) 301 }; 302 static devclass_t wpi_devclass; 303 304 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 305 306 MODULE_VERSION(wpi, 1); 307 308 MODULE_DEPEND(wpi, pci, 1, 1, 1); 309 MODULE_DEPEND(wpi, wlan, 1, 1, 1); 310 MODULE_DEPEND(wpi, firmware, 1, 1, 1); 311 312 static int 313 wpi_probe(device_t dev) 314 { 315 const struct wpi_ident *ident; 316 317 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 318 if (pci_get_vendor(dev) == ident->vendor && 319 pci_get_device(dev) == ident->device) { 320 device_set_desc(dev, ident->name); 321 return (BUS_PROBE_DEFAULT); 322 } 323 } 324 return ENXIO; 325 } 326 327 static int 328 wpi_attach(device_t dev) 329 { 330 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 331 struct ieee80211com *ic; 332 struct ifnet *ifp; 333 int i, error, rid; 334 #ifdef WPI_DEBUG 335 int supportsa = 1; 336 const struct wpi_ident *ident; 337 #endif 338 uint8_t macaddr[IEEE80211_ADDR_LEN]; 339 340 sc->sc_dev = dev; 341 342 #ifdef WPI_DEBUG 343 error = resource_int_value(device_get_name(sc->sc_dev), 344 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 345 if (error != 0) 346 sc->sc_debug = 0; 347 #else 348 sc->sc_debug = 0; 349 #endif 350 351 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 352 353 /* 354 * Get the offset of the PCI Express Capability Structure in PCI 355 * Configuration Space. 356 */ 357 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 358 if (error != 0) { 359 device_printf(dev, "PCIe capability structure not found!\n"); 360 return error; 361 } 362 363 /* 364 * Some card's only support 802.11b/g not a, check to see if 365 * this is one such card. A 0x0 in the subdevice table indicates 366 * the entire subdevice range is to be ignored. 367 */ 368 #ifdef WPI_DEBUG 369 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 370 if (ident->subdevice && 371 pci_get_subdevice(dev) == ident->subdevice) { 372 supportsa = 0; 373 break; 374 } 375 } 376 #endif 377 378 /* Clear device-specific "PCI retry timeout" register (41h). */ 379 pci_write_config(dev, 0x41, 0, 1); 380 381 /* Enable bus-mastering. */ 382 pci_enable_busmaster(dev); 383 384 rid = PCIR_BAR(0); 385 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 386 RF_ACTIVE); 387 if (sc->mem == NULL) { 388 device_printf(dev, "can't map mem space\n"); 389 return ENOMEM; 390 } 391 sc->sc_st = rman_get_bustag(sc->mem); 392 sc->sc_sh = rman_get_bushandle(sc->mem); 393 394 i = 1; 395 rid = 0; 396 if (pci_alloc_msi(dev, &i) == 0) 397 rid = 1; 398 /* Install interrupt handler. */ 399 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 400 (rid != 0 ? 0 : RF_SHAREABLE)); 401 if (sc->irq == NULL) { 402 device_printf(dev, "can't map interrupt\n"); 403 error = ENOMEM; 404 goto fail; 405 } 406 407 WPI_LOCK_INIT(sc); 408 WPI_TX_LOCK_INIT(sc); 409 WPI_RXON_LOCK_INIT(sc); 410 WPI_NT_LOCK_INIT(sc); 411 WPI_TXQ_LOCK_INIT(sc); 412 WPI_TXQ_STATE_LOCK_INIT(sc); 413 414 /* Allocate DMA memory for firmware transfers. */ 415 if ((error = wpi_alloc_fwmem(sc)) != 0) { 416 device_printf(dev, 417 "could not allocate memory for firmware, error %d\n", 418 error); 419 goto fail; 420 } 421 422 /* Allocate shared page. */ 423 if ((error = wpi_alloc_shared(sc)) != 0) { 424 device_printf(dev, "could not allocate shared page\n"); 425 goto fail; 426 } 427 428 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 429 for (i = 0; i < WPI_NTXQUEUES; i++) { 430 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 431 device_printf(dev, 432 "could not allocate TX ring %d, error %d\n", i, 433 error); 434 goto fail; 435 } 436 } 437 438 /* Allocate RX ring. */ 439 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 440 device_printf(dev, "could not allocate RX ring, error %d\n", 441 error); 442 goto fail; 443 } 444 445 /* Clear pending interrupts. */ 446 WPI_WRITE(sc, WPI_INT, 0xffffffff); 447 448 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 449 if (ifp == NULL) { 450 device_printf(dev, "can not allocate ifnet structure\n"); 451 goto fail; 452 } 453 454 ic = ifp->if_l2com; 455 ic->ic_ifp = ifp; 456 ic->ic_softc = sc; 457 ic->ic_name = device_get_nameunit(dev); 458 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 459 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 460 461 /* Set device capabilities. */ 462 ic->ic_caps = 463 IEEE80211_C_STA /* station mode supported */ 464 | IEEE80211_C_IBSS /* IBSS mode supported */ 465 | IEEE80211_C_HOSTAP /* Host access point mode */ 466 | IEEE80211_C_MONITOR /* monitor mode supported */ 467 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 468 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 469 | IEEE80211_C_TXPMGT /* tx power management */ 470 | IEEE80211_C_SHSLOT /* short slot time supported */ 471 | IEEE80211_C_WPA /* 802.11i */ 472 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 473 | IEEE80211_C_WME /* 802.11e */ 474 | IEEE80211_C_PMGT /* Station-side power mgmt */ 475 ; 476 477 ic->ic_cryptocaps = 478 IEEE80211_CRYPTO_AES_CCM; 479 480 /* 481 * Read in the eeprom and also setup the channels for 482 * net80211. We don't set the rates as net80211 does this for us 483 */ 484 if ((error = wpi_read_eeprom(sc, macaddr)) != 0) { 485 device_printf(dev, "could not read EEPROM, error %d\n", 486 error); 487 goto fail; 488 } 489 490 #ifdef WPI_DEBUG 491 if (bootverbose) { 492 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 493 sc->domain); 494 device_printf(sc->sc_dev, "Hardware Type: %c\n", 495 sc->type > 1 ? 'B': '?'); 496 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 497 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 498 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 499 supportsa ? "does" : "does not"); 500 501 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 502 check what sc->rev really represents - benjsc 20070615 */ 503 } 504 #endif 505 506 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 507 ifp->if_softc = sc; 508 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 509 ifp->if_init = wpi_init; 510 ifp->if_ioctl = wpi_ioctl; 511 ifp->if_start = wpi_start; 512 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 513 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 514 IFQ_SET_READY(&ifp->if_snd); 515 516 ieee80211_ifattach(ic, macaddr); 517 ic->ic_vap_create = wpi_vap_create; 518 ic->ic_vap_delete = wpi_vap_delete; 519 ic->ic_raw_xmit = wpi_raw_xmit; 520 ic->ic_node_alloc = wpi_node_alloc; 521 sc->sc_node_free = ic->ic_node_free; 522 ic->ic_node_free = wpi_node_free; 523 ic->ic_wme.wme_update = wpi_updateedca; 524 ic->ic_update_promisc = wpi_update_promisc; 525 ic->ic_update_mcast = wpi_update_mcast; 526 ic->ic_newassoc = wpi_newassoc; 527 ic->ic_scan_start = wpi_scan_start; 528 ic->ic_scan_end = wpi_scan_end; 529 ic->ic_set_channel = wpi_set_channel; 530 ic->ic_scan_curchan = wpi_scan_curchan; 531 ic->ic_scan_mindwell = wpi_scan_mindwell; 532 ic->ic_setregdomain = wpi_setregdomain; 533 534 sc->sc_update_rx_ring = wpi_update_rx_ring; 535 sc->sc_update_tx_ring = wpi_update_tx_ring; 536 537 wpi_radiotap_attach(sc); 538 539 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 540 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 541 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 542 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 543 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 544 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 545 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 546 TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc); 547 548 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 549 taskqueue_thread_enqueue, &sc->sc_tq); 550 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 551 if (error != 0) { 552 device_printf(dev, "can't start threads, error %d\n", error); 553 goto fail; 554 } 555 556 wpi_sysctlattach(sc); 557 558 /* 559 * Hook our interrupt after all initialization is complete. 560 */ 561 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 562 NULL, wpi_intr, sc, &sc->sc_ih); 563 if (error != 0) { 564 device_printf(dev, "can't establish interrupt, error %d\n", 565 error); 566 goto fail; 567 } 568 569 if (bootverbose) 570 ieee80211_announce(ic); 571 572 #ifdef WPI_DEBUG 573 if (sc->sc_debug & WPI_DEBUG_HW) 574 ieee80211_announce_channels(ic); 575 #endif 576 577 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 578 return 0; 579 580 fail: wpi_detach(dev); 581 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 582 return error; 583 } 584 585 /* 586 * Attach the interface to 802.11 radiotap. 587 */ 588 static void 589 wpi_radiotap_attach(struct wpi_softc *sc) 590 { 591 struct ifnet *ifp = sc->sc_ifp; 592 struct ieee80211com *ic = ifp->if_l2com; 593 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 594 ieee80211_radiotap_attach(ic, 595 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 596 WPI_TX_RADIOTAP_PRESENT, 597 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 598 WPI_RX_RADIOTAP_PRESENT); 599 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 600 } 601 602 static void 603 wpi_sysctlattach(struct wpi_softc *sc) 604 { 605 #ifdef WPI_DEBUG 606 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 607 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 608 609 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 610 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 611 "control debugging printfs"); 612 #endif 613 } 614 615 static void 616 wpi_init_beacon(struct wpi_vap *wvp) 617 { 618 struct wpi_buf *bcn = &wvp->wv_bcbuf; 619 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 620 621 cmd->id = WPI_ID_BROADCAST; 622 cmd->ofdm_mask = 0xff; 623 cmd->cck_mask = 0x0f; 624 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 625 626 /* 627 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue 628 * XXX by using WPI_TX_NEED_ACK instead (with some side effects). 629 */ 630 cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP); 631 632 bcn->code = WPI_CMD_SET_BEACON; 633 bcn->ac = WPI_CMD_QUEUE_NUM; 634 bcn->size = sizeof(struct wpi_cmd_beacon); 635 } 636 637 static struct ieee80211vap * 638 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 639 enum ieee80211_opmode opmode, int flags, 640 const uint8_t bssid[IEEE80211_ADDR_LEN], 641 const uint8_t mac[IEEE80211_ADDR_LEN]) 642 { 643 struct wpi_vap *wvp; 644 struct ieee80211vap *vap; 645 646 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 647 return NULL; 648 649 wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), 650 M_80211_VAP, M_NOWAIT | M_ZERO); 651 if (wvp == NULL) 652 return NULL; 653 vap = &wvp->wv_vap; 654 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 655 656 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 657 WPI_VAP_LOCK_INIT(wvp); 658 wpi_init_beacon(wvp); 659 } 660 661 /* Override with driver methods. */ 662 vap->iv_key_set = wpi_key_set; 663 vap->iv_key_delete = wpi_key_delete; 664 wvp->wv_recv_mgmt = vap->iv_recv_mgmt; 665 vap->iv_recv_mgmt = wpi_recv_mgmt; 666 wvp->wv_newstate = vap->iv_newstate; 667 vap->iv_newstate = wpi_newstate; 668 vap->iv_update_beacon = wpi_update_beacon; 669 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 670 671 ieee80211_ratectl_init(vap); 672 /* Complete setup. */ 673 ieee80211_vap_attach(vap, ieee80211_media_change, 674 ieee80211_media_status); 675 ic->ic_opmode = opmode; 676 return vap; 677 } 678 679 static void 680 wpi_vap_delete(struct ieee80211vap *vap) 681 { 682 struct wpi_vap *wvp = WPI_VAP(vap); 683 struct wpi_buf *bcn = &wvp->wv_bcbuf; 684 enum ieee80211_opmode opmode = vap->iv_opmode; 685 686 ieee80211_ratectl_deinit(vap); 687 ieee80211_vap_detach(vap); 688 689 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 690 if (bcn->m != NULL) 691 m_freem(bcn->m); 692 693 WPI_VAP_LOCK_DESTROY(wvp); 694 } 695 696 free(wvp, M_80211_VAP); 697 } 698 699 static int 700 wpi_detach(device_t dev) 701 { 702 struct wpi_softc *sc = device_get_softc(dev); 703 struct ifnet *ifp = sc->sc_ifp; 704 struct ieee80211com *ic; 705 int qid; 706 707 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 708 709 if (ifp != NULL) { 710 ic = ifp->if_l2com; 711 712 ieee80211_draintask(ic, &sc->sc_radioon_task); 713 ieee80211_draintask(ic, &sc->sc_start_task); 714 715 wpi_stop(sc); 716 717 taskqueue_drain_all(sc->sc_tq); 718 taskqueue_free(sc->sc_tq); 719 720 callout_drain(&sc->watchdog_rfkill); 721 callout_drain(&sc->tx_timeout); 722 callout_drain(&sc->scan_timeout); 723 callout_drain(&sc->calib_to); 724 ieee80211_ifdetach(ic); 725 } 726 727 /* Uninstall interrupt handler. */ 728 if (sc->irq != NULL) { 729 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 730 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 731 sc->irq); 732 pci_release_msi(dev); 733 } 734 735 if (sc->txq[0].data_dmat) { 736 /* Free DMA resources. */ 737 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 738 wpi_free_tx_ring(sc, &sc->txq[qid]); 739 740 wpi_free_rx_ring(sc); 741 wpi_free_shared(sc); 742 } 743 744 if (sc->fw_dma.tag) 745 wpi_free_fwmem(sc); 746 747 if (sc->mem != NULL) 748 bus_release_resource(dev, SYS_RES_MEMORY, 749 rman_get_rid(sc->mem), sc->mem); 750 751 if (ifp != NULL) 752 if_free(ifp); 753 754 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 755 WPI_TXQ_STATE_LOCK_DESTROY(sc); 756 WPI_TXQ_LOCK_DESTROY(sc); 757 WPI_NT_LOCK_DESTROY(sc); 758 WPI_RXON_LOCK_DESTROY(sc); 759 WPI_TX_LOCK_DESTROY(sc); 760 WPI_LOCK_DESTROY(sc); 761 return 0; 762 } 763 764 static int 765 wpi_shutdown(device_t dev) 766 { 767 struct wpi_softc *sc = device_get_softc(dev); 768 769 wpi_stop(sc); 770 return 0; 771 } 772 773 static int 774 wpi_suspend(device_t dev) 775 { 776 struct wpi_softc *sc = device_get_softc(dev); 777 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 778 779 ieee80211_suspend_all(ic); 780 return 0; 781 } 782 783 static int 784 wpi_resume(device_t dev) 785 { 786 struct wpi_softc *sc = device_get_softc(dev); 787 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 788 789 /* Clear device-specific "PCI retry timeout" register (41h). */ 790 pci_write_config(dev, 0x41, 0, 1); 791 792 ieee80211_resume_all(ic); 793 return 0; 794 } 795 796 /* 797 * Grab exclusive access to NIC memory. 798 */ 799 static int 800 wpi_nic_lock(struct wpi_softc *sc) 801 { 802 int ntries; 803 804 /* Request exclusive access to NIC. */ 805 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 806 807 /* Spin until we actually get the lock. */ 808 for (ntries = 0; ntries < 1000; ntries++) { 809 if ((WPI_READ(sc, WPI_GP_CNTRL) & 810 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 811 WPI_GP_CNTRL_MAC_ACCESS_ENA) 812 return 0; 813 DELAY(10); 814 } 815 816 device_printf(sc->sc_dev, "could not lock memory\n"); 817 818 return ETIMEDOUT; 819 } 820 821 /* 822 * Release lock on NIC memory. 823 */ 824 static __inline void 825 wpi_nic_unlock(struct wpi_softc *sc) 826 { 827 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 828 } 829 830 static __inline uint32_t 831 wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 832 { 833 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 834 WPI_BARRIER_READ_WRITE(sc); 835 return WPI_READ(sc, WPI_PRPH_RDATA); 836 } 837 838 static __inline void 839 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 840 { 841 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 842 WPI_BARRIER_WRITE(sc); 843 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 844 } 845 846 static __inline void 847 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 848 { 849 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 850 } 851 852 static __inline void 853 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 854 { 855 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 856 } 857 858 static __inline void 859 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 860 const uint32_t *data, int count) 861 { 862 for (; count > 0; count--, data++, addr += 4) 863 wpi_prph_write(sc, addr, *data); 864 } 865 866 static __inline uint32_t 867 wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 868 { 869 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 870 WPI_BARRIER_READ_WRITE(sc); 871 return WPI_READ(sc, WPI_MEM_RDATA); 872 } 873 874 static __inline void 875 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 876 int count) 877 { 878 for (; count > 0; count--, addr += 4) 879 *data++ = wpi_mem_read(sc, addr); 880 } 881 882 static int 883 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 884 { 885 uint8_t *out = data; 886 uint32_t val; 887 int error, ntries; 888 889 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 890 891 if ((error = wpi_nic_lock(sc)) != 0) 892 return error; 893 894 for (; count > 0; count -= 2, addr++) { 895 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 896 for (ntries = 0; ntries < 10; ntries++) { 897 val = WPI_READ(sc, WPI_EEPROM); 898 if (val & WPI_EEPROM_READ_VALID) 899 break; 900 DELAY(5); 901 } 902 if (ntries == 10) { 903 device_printf(sc->sc_dev, 904 "timeout reading ROM at 0x%x\n", addr); 905 return ETIMEDOUT; 906 } 907 *out++= val >> 16; 908 if (count > 1) 909 *out ++= val >> 24; 910 } 911 912 wpi_nic_unlock(sc); 913 914 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 915 916 return 0; 917 } 918 919 static void 920 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 921 { 922 if (error != 0) 923 return; 924 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 925 *(bus_addr_t *)arg = segs[0].ds_addr; 926 } 927 928 /* 929 * Allocates a contiguous block of dma memory of the requested size and 930 * alignment. 931 */ 932 static int 933 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 934 void **kvap, bus_size_t size, bus_size_t alignment) 935 { 936 int error; 937 938 dma->tag = NULL; 939 dma->size = size; 940 941 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 942 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 943 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 944 if (error != 0) 945 goto fail; 946 947 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 948 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 949 if (error != 0) 950 goto fail; 951 952 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 953 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 954 if (error != 0) 955 goto fail; 956 957 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 958 959 if (kvap != NULL) 960 *kvap = dma->vaddr; 961 962 return 0; 963 964 fail: wpi_dma_contig_free(dma); 965 return error; 966 } 967 968 static void 969 wpi_dma_contig_free(struct wpi_dma_info *dma) 970 { 971 if (dma->vaddr != NULL) { 972 bus_dmamap_sync(dma->tag, dma->map, 973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 974 bus_dmamap_unload(dma->tag, dma->map); 975 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 976 dma->vaddr = NULL; 977 } 978 if (dma->tag != NULL) { 979 bus_dma_tag_destroy(dma->tag); 980 dma->tag = NULL; 981 } 982 } 983 984 /* 985 * Allocate a shared page between host and NIC. 986 */ 987 static int 988 wpi_alloc_shared(struct wpi_softc *sc) 989 { 990 /* Shared buffer must be aligned on a 4KB boundary. */ 991 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 992 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 993 } 994 995 static void 996 wpi_free_shared(struct wpi_softc *sc) 997 { 998 wpi_dma_contig_free(&sc->shared_dma); 999 } 1000 1001 /* 1002 * Allocate DMA-safe memory for firmware transfer. 1003 */ 1004 static int 1005 wpi_alloc_fwmem(struct wpi_softc *sc) 1006 { 1007 /* Must be aligned on a 16-byte boundary. */ 1008 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1009 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 1010 } 1011 1012 static void 1013 wpi_free_fwmem(struct wpi_softc *sc) 1014 { 1015 wpi_dma_contig_free(&sc->fw_dma); 1016 } 1017 1018 static int 1019 wpi_alloc_rx_ring(struct wpi_softc *sc) 1020 { 1021 struct wpi_rx_ring *ring = &sc->rxq; 1022 bus_size_t size; 1023 int i, error; 1024 1025 ring->cur = 0; 1026 ring->update = 0; 1027 1028 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1029 1030 /* Allocate RX descriptors (16KB aligned.) */ 1031 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1032 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1033 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1034 if (error != 0) { 1035 device_printf(sc->sc_dev, 1036 "%s: could not allocate RX ring DMA memory, error %d\n", 1037 __func__, error); 1038 goto fail; 1039 } 1040 1041 /* Create RX buffer DMA tag. */ 1042 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1043 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1044 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1045 &ring->data_dmat); 1046 if (error != 0) { 1047 device_printf(sc->sc_dev, 1048 "%s: could not create RX buf DMA tag, error %d\n", 1049 __func__, error); 1050 goto fail; 1051 } 1052 1053 /* 1054 * Allocate and map RX buffers. 1055 */ 1056 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1057 struct wpi_rx_data *data = &ring->data[i]; 1058 bus_addr_t paddr; 1059 1060 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1061 if (error != 0) { 1062 device_printf(sc->sc_dev, 1063 "%s: could not create RX buf DMA map, error %d\n", 1064 __func__, error); 1065 goto fail; 1066 } 1067 1068 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1069 if (data->m == NULL) { 1070 device_printf(sc->sc_dev, 1071 "%s: could not allocate RX mbuf\n", __func__); 1072 error = ENOBUFS; 1073 goto fail; 1074 } 1075 1076 error = bus_dmamap_load(ring->data_dmat, data->map, 1077 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1078 &paddr, BUS_DMA_NOWAIT); 1079 if (error != 0 && error != EFBIG) { 1080 device_printf(sc->sc_dev, 1081 "%s: can't map mbuf (error %d)\n", __func__, 1082 error); 1083 goto fail; 1084 } 1085 1086 /* Set physical address of RX buffer. */ 1087 ring->desc[i] = htole32(paddr); 1088 } 1089 1090 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1091 BUS_DMASYNC_PREWRITE); 1092 1093 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1094 1095 return 0; 1096 1097 fail: wpi_free_rx_ring(sc); 1098 1099 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1100 1101 return error; 1102 } 1103 1104 static void 1105 wpi_update_rx_ring(struct wpi_softc *sc) 1106 { 1107 WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7); 1108 } 1109 1110 static void 1111 wpi_update_rx_ring_ps(struct wpi_softc *sc) 1112 { 1113 struct wpi_rx_ring *ring = &sc->rxq; 1114 1115 if (ring->update != 0) { 1116 /* Wait for INT_WAKEUP event. */ 1117 return; 1118 } 1119 1120 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1121 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1122 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1123 __func__); 1124 ring->update = 1; 1125 } else { 1126 wpi_update_rx_ring(sc); 1127 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1128 } 1129 } 1130 1131 static void 1132 wpi_reset_rx_ring(struct wpi_softc *sc) 1133 { 1134 struct wpi_rx_ring *ring = &sc->rxq; 1135 int ntries; 1136 1137 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1138 1139 if (wpi_nic_lock(sc) == 0) { 1140 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1141 for (ntries = 0; ntries < 1000; ntries++) { 1142 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1143 WPI_FH_RX_STATUS_IDLE) 1144 break; 1145 DELAY(10); 1146 } 1147 wpi_nic_unlock(sc); 1148 } 1149 1150 ring->cur = 0; 1151 ring->update = 0; 1152 } 1153 1154 static void 1155 wpi_free_rx_ring(struct wpi_softc *sc) 1156 { 1157 struct wpi_rx_ring *ring = &sc->rxq; 1158 int i; 1159 1160 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1161 1162 wpi_dma_contig_free(&ring->desc_dma); 1163 1164 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1165 struct wpi_rx_data *data = &ring->data[i]; 1166 1167 if (data->m != NULL) { 1168 bus_dmamap_sync(ring->data_dmat, data->map, 1169 BUS_DMASYNC_POSTREAD); 1170 bus_dmamap_unload(ring->data_dmat, data->map); 1171 m_freem(data->m); 1172 data->m = NULL; 1173 } 1174 if (data->map != NULL) 1175 bus_dmamap_destroy(ring->data_dmat, data->map); 1176 } 1177 if (ring->data_dmat != NULL) { 1178 bus_dma_tag_destroy(ring->data_dmat); 1179 ring->data_dmat = NULL; 1180 } 1181 } 1182 1183 static int 1184 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1185 { 1186 bus_addr_t paddr; 1187 bus_size_t size; 1188 int i, error; 1189 1190 ring->qid = qid; 1191 ring->queued = 0; 1192 ring->cur = 0; 1193 ring->update = 0; 1194 1195 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1196 1197 /* Allocate TX descriptors (16KB aligned.) */ 1198 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1199 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1200 size, WPI_RING_DMA_ALIGN); 1201 if (error != 0) { 1202 device_printf(sc->sc_dev, 1203 "%s: could not allocate TX ring DMA memory, error %d\n", 1204 __func__, error); 1205 goto fail; 1206 } 1207 1208 /* Update shared area with ring physical address. */ 1209 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1210 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1211 BUS_DMASYNC_PREWRITE); 1212 1213 /* 1214 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1215 * to allocate commands space for other rings. 1216 * XXX Do we really need to allocate descriptors for other rings? 1217 */ 1218 if (qid > WPI_CMD_QUEUE_NUM) { 1219 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1220 return 0; 1221 } 1222 1223 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1224 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1225 size, 4); 1226 if (error != 0) { 1227 device_printf(sc->sc_dev, 1228 "%s: could not allocate TX cmd DMA memory, error %d\n", 1229 __func__, error); 1230 goto fail; 1231 } 1232 1233 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1234 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1235 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1236 &ring->data_dmat); 1237 if (error != 0) { 1238 device_printf(sc->sc_dev, 1239 "%s: could not create TX buf DMA tag, error %d\n", 1240 __func__, error); 1241 goto fail; 1242 } 1243 1244 paddr = ring->cmd_dma.paddr; 1245 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1246 struct wpi_tx_data *data = &ring->data[i]; 1247 1248 data->cmd_paddr = paddr; 1249 paddr += sizeof (struct wpi_tx_cmd); 1250 1251 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1252 if (error != 0) { 1253 device_printf(sc->sc_dev, 1254 "%s: could not create TX buf DMA map, error %d\n", 1255 __func__, error); 1256 goto fail; 1257 } 1258 } 1259 1260 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1261 1262 return 0; 1263 1264 fail: wpi_free_tx_ring(sc, ring); 1265 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1266 return error; 1267 } 1268 1269 static void 1270 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1271 { 1272 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1273 } 1274 1275 static void 1276 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1277 { 1278 1279 if (ring->update != 0) { 1280 /* Wait for INT_WAKEUP event. */ 1281 return; 1282 } 1283 1284 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1285 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) { 1286 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1287 __func__, ring->qid); 1288 ring->update = 1; 1289 } else { 1290 wpi_update_tx_ring(sc, ring); 1291 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1292 } 1293 } 1294 1295 static void 1296 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1297 { 1298 int i; 1299 1300 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1301 1302 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1303 struct wpi_tx_data *data = &ring->data[i]; 1304 1305 if (data->m != NULL) { 1306 bus_dmamap_sync(ring->data_dmat, data->map, 1307 BUS_DMASYNC_POSTWRITE); 1308 bus_dmamap_unload(ring->data_dmat, data->map); 1309 m_freem(data->m); 1310 data->m = NULL; 1311 } 1312 if (data->ni != NULL) { 1313 ieee80211_free_node(data->ni); 1314 data->ni = NULL; 1315 } 1316 } 1317 /* Clear TX descriptors. */ 1318 memset(ring->desc, 0, ring->desc_dma.size); 1319 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1320 BUS_DMASYNC_PREWRITE); 1321 sc->qfullmsk &= ~(1 << ring->qid); 1322 ring->queued = 0; 1323 ring->cur = 0; 1324 ring->update = 0; 1325 } 1326 1327 static void 1328 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1329 { 1330 int i; 1331 1332 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1333 1334 wpi_dma_contig_free(&ring->desc_dma); 1335 wpi_dma_contig_free(&ring->cmd_dma); 1336 1337 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1338 struct wpi_tx_data *data = &ring->data[i]; 1339 1340 if (data->m != NULL) { 1341 bus_dmamap_sync(ring->data_dmat, data->map, 1342 BUS_DMASYNC_POSTWRITE); 1343 bus_dmamap_unload(ring->data_dmat, data->map); 1344 m_freem(data->m); 1345 } 1346 if (data->map != NULL) 1347 bus_dmamap_destroy(ring->data_dmat, data->map); 1348 } 1349 if (ring->data_dmat != NULL) { 1350 bus_dma_tag_destroy(ring->data_dmat); 1351 ring->data_dmat = NULL; 1352 } 1353 } 1354 1355 /* 1356 * Extract various information from EEPROM. 1357 */ 1358 static int 1359 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1360 { 1361 #define WPI_CHK(res) do { \ 1362 if ((error = res) != 0) \ 1363 goto fail; \ 1364 } while (0) 1365 int error, i; 1366 1367 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1368 1369 /* Adapter has to be powered on for EEPROM access to work. */ 1370 if ((error = wpi_apm_init(sc)) != 0) { 1371 device_printf(sc->sc_dev, 1372 "%s: could not power ON adapter, error %d\n", __func__, 1373 error); 1374 return error; 1375 } 1376 1377 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1378 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1379 error = EIO; 1380 goto fail; 1381 } 1382 /* Clear HW ownership of EEPROM. */ 1383 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1384 1385 /* Read the hardware capabilities, revision and SKU type. */ 1386 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1387 sizeof(sc->cap))); 1388 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1389 sizeof(sc->rev))); 1390 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1391 sizeof(sc->type))); 1392 1393 sc->rev = le16toh(sc->rev); 1394 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1395 sc->rev, sc->type); 1396 1397 /* Read the regulatory domain (4 ASCII characters.) */ 1398 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1399 sizeof(sc->domain))); 1400 1401 /* Read MAC address. */ 1402 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1403 IEEE80211_ADDR_LEN)); 1404 1405 /* Read the list of authorized channels. */ 1406 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1407 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1408 1409 /* Read the list of TX power groups. */ 1410 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1411 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1412 1413 fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1414 1415 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1416 __func__); 1417 1418 return error; 1419 #undef WPI_CHK 1420 } 1421 1422 /* 1423 * Translate EEPROM flags to net80211. 1424 */ 1425 static uint32_t 1426 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1427 { 1428 uint32_t nflags; 1429 1430 nflags = 0; 1431 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1432 nflags |= IEEE80211_CHAN_PASSIVE; 1433 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1434 nflags |= IEEE80211_CHAN_NOADHOC; 1435 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1436 nflags |= IEEE80211_CHAN_DFS; 1437 /* XXX apparently IBSS may still be marked */ 1438 nflags |= IEEE80211_CHAN_NOADHOC; 1439 } 1440 1441 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1442 if (nflags & IEEE80211_CHAN_NOADHOC) 1443 nflags |= IEEE80211_CHAN_NOHOSTAP; 1444 1445 return nflags; 1446 } 1447 1448 static void 1449 wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1450 { 1451 struct ifnet *ifp = sc->sc_ifp; 1452 struct ieee80211com *ic = ifp->if_l2com; 1453 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1454 const struct wpi_chan_band *band = &wpi_bands[n]; 1455 struct ieee80211_channel *c; 1456 uint8_t chan; 1457 int i, nflags; 1458 1459 for (i = 0; i < band->nchan; i++) { 1460 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1461 DPRINTF(sc, WPI_DEBUG_EEPROM, 1462 "Channel Not Valid: %d, band %d\n", 1463 band->chan[i],n); 1464 continue; 1465 } 1466 1467 chan = band->chan[i]; 1468 nflags = wpi_eeprom_channel_flags(&channels[i]); 1469 1470 c = &ic->ic_channels[ic->ic_nchans++]; 1471 c->ic_ieee = chan; 1472 c->ic_maxregpower = channels[i].maxpwr; 1473 c->ic_maxpower = 2*c->ic_maxregpower; 1474 1475 if (n == 0) { /* 2GHz band */ 1476 c->ic_freq = ieee80211_ieee2mhz(chan, 1477 IEEE80211_CHAN_G); 1478 1479 /* G =>'s B is supported */ 1480 c->ic_flags = IEEE80211_CHAN_B | nflags; 1481 c = &ic->ic_channels[ic->ic_nchans++]; 1482 c[0] = c[-1]; 1483 c->ic_flags = IEEE80211_CHAN_G | nflags; 1484 } else { /* 5GHz band */ 1485 c->ic_freq = ieee80211_ieee2mhz(chan, 1486 IEEE80211_CHAN_A); 1487 1488 c->ic_flags = IEEE80211_CHAN_A | nflags; 1489 } 1490 1491 /* Save maximum allowed TX power for this channel. */ 1492 sc->maxpwr[chan] = channels[i].maxpwr; 1493 1494 DPRINTF(sc, WPI_DEBUG_EEPROM, 1495 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1496 " offset %d\n", chan, c->ic_freq, 1497 channels[i].flags, sc->maxpwr[chan], 1498 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1499 } 1500 } 1501 1502 /** 1503 * Read the eeprom to find out what channels are valid for the given 1504 * band and update net80211 with what we find. 1505 */ 1506 static int 1507 wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1508 { 1509 struct ifnet *ifp = sc->sc_ifp; 1510 struct ieee80211com *ic = ifp->if_l2com; 1511 const struct wpi_chan_band *band = &wpi_bands[n]; 1512 int error; 1513 1514 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1515 1516 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1517 band->nchan * sizeof (struct wpi_eeprom_chan)); 1518 if (error != 0) { 1519 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1520 return error; 1521 } 1522 1523 wpi_read_eeprom_band(sc, n); 1524 1525 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1526 1527 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1528 1529 return 0; 1530 } 1531 1532 static struct wpi_eeprom_chan * 1533 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1534 { 1535 int i, j; 1536 1537 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1538 for (i = 0; i < wpi_bands[j].nchan; i++) 1539 if (wpi_bands[j].chan[i] == c->ic_ieee) 1540 return &sc->eeprom_channels[j][i]; 1541 1542 return NULL; 1543 } 1544 1545 /* 1546 * Enforce flags read from EEPROM. 1547 */ 1548 static int 1549 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1550 int nchan, struct ieee80211_channel chans[]) 1551 { 1552 struct ifnet *ifp = ic->ic_ifp; 1553 struct wpi_softc *sc = ifp->if_softc; 1554 int i; 1555 1556 for (i = 0; i < nchan; i++) { 1557 struct ieee80211_channel *c = &chans[i]; 1558 struct wpi_eeprom_chan *channel; 1559 1560 channel = wpi_find_eeprom_channel(sc, c); 1561 if (channel == NULL) { 1562 if_printf(ic->ic_ifp, 1563 "%s: invalid channel %u freq %u/0x%x\n", 1564 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1565 return EINVAL; 1566 } 1567 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1568 } 1569 1570 return 0; 1571 } 1572 1573 static int 1574 wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1575 { 1576 struct wpi_power_group *group = &sc->groups[n]; 1577 struct wpi_eeprom_group rgroup; 1578 int i, error; 1579 1580 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1581 1582 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1583 &rgroup, sizeof rgroup)) != 0) { 1584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1585 return error; 1586 } 1587 1588 /* Save TX power group information. */ 1589 group->chan = rgroup.chan; 1590 group->maxpwr = rgroup.maxpwr; 1591 /* Retrieve temperature at which the samples were taken. */ 1592 group->temp = (int16_t)le16toh(rgroup.temp); 1593 1594 DPRINTF(sc, WPI_DEBUG_EEPROM, 1595 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1596 group->maxpwr, group->temp); 1597 1598 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1599 group->samples[i].index = rgroup.samples[i].index; 1600 group->samples[i].power = rgroup.samples[i].power; 1601 1602 DPRINTF(sc, WPI_DEBUG_EEPROM, 1603 "\tsample %d: index=%d power=%d\n", i, 1604 group->samples[i].index, group->samples[i].power); 1605 } 1606 1607 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1608 1609 return 0; 1610 } 1611 1612 static int 1613 wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1614 { 1615 int newid = WPI_ID_IBSS_MIN; 1616 1617 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1618 if ((sc->nodesmsk & (1 << newid)) == 0) { 1619 sc->nodesmsk |= 1 << newid; 1620 return newid; 1621 } 1622 } 1623 1624 return WPI_ID_UNDEFINED; 1625 } 1626 1627 static __inline int 1628 wpi_add_node_entry_sta(struct wpi_softc *sc) 1629 { 1630 sc->nodesmsk |= 1 << WPI_ID_BSS; 1631 1632 return WPI_ID_BSS; 1633 } 1634 1635 static __inline int 1636 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1637 { 1638 if (id == WPI_ID_UNDEFINED) 1639 return 0; 1640 1641 return (sc->nodesmsk >> id) & 1; 1642 } 1643 1644 static __inline void 1645 wpi_clear_node_table(struct wpi_softc *sc) 1646 { 1647 sc->nodesmsk = 0; 1648 } 1649 1650 static __inline void 1651 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1652 { 1653 sc->nodesmsk &= ~(1 << id); 1654 } 1655 1656 static struct ieee80211_node * 1657 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1658 { 1659 struct wpi_node *wn; 1660 1661 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1662 M_NOWAIT | M_ZERO); 1663 1664 if (wn == NULL) 1665 return NULL; 1666 1667 wn->id = WPI_ID_UNDEFINED; 1668 1669 return &wn->ni; 1670 } 1671 1672 static void 1673 wpi_node_free(struct ieee80211_node *ni) 1674 { 1675 struct ieee80211com *ic = ni->ni_ic; 1676 struct wpi_softc *sc = ic->ic_ifp->if_softc; 1677 struct wpi_node *wn = WPI_NODE(ni); 1678 1679 if (wn->id != WPI_ID_UNDEFINED) { 1680 WPI_NT_LOCK(sc); 1681 if (wpi_check_node_entry(sc, wn->id)) { 1682 wpi_del_node_entry(sc, wn->id); 1683 wpi_del_node(sc, ni); 1684 } 1685 WPI_NT_UNLOCK(sc); 1686 } 1687 1688 sc->sc_node_free(ni); 1689 } 1690 1691 static __inline int 1692 wpi_check_bss_filter(struct wpi_softc *sc) 1693 { 1694 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1695 } 1696 1697 static void 1698 wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, 1699 const struct ieee80211_rx_stats *rxs, 1700 int rssi, int nf) 1701 { 1702 struct ieee80211vap *vap = ni->ni_vap; 1703 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 1704 struct wpi_vap *wvp = WPI_VAP(vap); 1705 uint64_t ni_tstamp, rx_tstamp; 1706 1707 wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf); 1708 1709 if (vap->iv_opmode == IEEE80211_M_IBSS && 1710 vap->iv_state == IEEE80211_S_RUN && 1711 (subtype == IEEE80211_FC0_SUBTYPE_BEACON || 1712 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { 1713 ni_tstamp = le64toh(ni->ni_tstamp.tsf); 1714 rx_tstamp = le64toh(sc->rx_tstamp); 1715 1716 if (ni_tstamp >= rx_tstamp) { 1717 DPRINTF(sc, WPI_DEBUG_STATE, 1718 "ibss merge, tsf %ju tstamp %ju\n", 1719 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp); 1720 (void) ieee80211_ibss_merge(ni); 1721 } 1722 } 1723 } 1724 1725 static void 1726 wpi_restore_node(void *arg, struct ieee80211_node *ni) 1727 { 1728 struct wpi_softc *sc = arg; 1729 struct wpi_node *wn = WPI_NODE(ni); 1730 int error; 1731 1732 WPI_NT_LOCK(sc); 1733 if (wn->id != WPI_ID_UNDEFINED) { 1734 wn->id = WPI_ID_UNDEFINED; 1735 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 1736 device_printf(sc->sc_dev, 1737 "%s: could not add IBSS node, error %d\n", 1738 __func__, error); 1739 } 1740 } 1741 WPI_NT_UNLOCK(sc); 1742 } 1743 1744 static void 1745 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp) 1746 { 1747 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1748 1749 /* Set group keys once. */ 1750 WPI_NT_LOCK(sc); 1751 wvp->wv_gtk = 0; 1752 WPI_NT_UNLOCK(sc); 1753 1754 ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc); 1755 ieee80211_crypto_reload_keys(ic); 1756 } 1757 1758 /** 1759 * Called by net80211 when ever there is a change to 80211 state machine 1760 */ 1761 static int 1762 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1763 { 1764 struct wpi_vap *wvp = WPI_VAP(vap); 1765 struct ieee80211com *ic = vap->iv_ic; 1766 struct ifnet *ifp = ic->ic_ifp; 1767 struct wpi_softc *sc = ifp->if_softc; 1768 int error = 0; 1769 1770 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1771 1772 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1773 ieee80211_state_name[vap->iv_state], 1774 ieee80211_state_name[nstate]); 1775 1776 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1777 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1778 device_printf(sc->sc_dev, 1779 "%s: could not set power saving level\n", 1780 __func__); 1781 return error; 1782 } 1783 1784 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1785 } 1786 1787 switch (nstate) { 1788 case IEEE80211_S_SCAN: 1789 WPI_RXON_LOCK(sc); 1790 if (wpi_check_bss_filter(sc) != 0) { 1791 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1792 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1793 device_printf(sc->sc_dev, 1794 "%s: could not send RXON\n", __func__); 1795 } 1796 } 1797 WPI_RXON_UNLOCK(sc); 1798 break; 1799 1800 case IEEE80211_S_ASSOC: 1801 if (vap->iv_state != IEEE80211_S_RUN) 1802 break; 1803 /* FALLTHROUGH */ 1804 case IEEE80211_S_AUTH: 1805 /* 1806 * NB: do not optimize AUTH -> AUTH state transmission - 1807 * this will break powersave with non-QoS AP! 1808 */ 1809 1810 /* 1811 * The node must be registered in the firmware before auth. 1812 * Also the associd must be cleared on RUN -> ASSOC 1813 * transitions. 1814 */ 1815 if ((error = wpi_auth(sc, vap)) != 0) { 1816 device_printf(sc->sc_dev, 1817 "%s: could not move to AUTH state, error %d\n", 1818 __func__, error); 1819 } 1820 break; 1821 1822 case IEEE80211_S_RUN: 1823 /* 1824 * RUN -> RUN transition: 1825 * STA mode: Just restart the timers. 1826 * IBSS mode: Process IBSS merge. 1827 */ 1828 if (vap->iv_state == IEEE80211_S_RUN) { 1829 if (vap->iv_opmode != IEEE80211_M_IBSS) { 1830 WPI_RXON_LOCK(sc); 1831 wpi_calib_timeout(sc); 1832 WPI_RXON_UNLOCK(sc); 1833 break; 1834 } else { 1835 /* 1836 * Drop the BSS_FILTER bit 1837 * (there is no another way to change bssid). 1838 */ 1839 WPI_RXON_LOCK(sc); 1840 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1841 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1842 device_printf(sc->sc_dev, 1843 "%s: could not send RXON\n", 1844 __func__); 1845 } 1846 WPI_RXON_UNLOCK(sc); 1847 1848 /* Restore all what was lost. */ 1849 wpi_restore_node_table(sc, wvp); 1850 1851 /* XXX set conditionally? */ 1852 wpi_updateedca(ic); 1853 } 1854 } 1855 1856 /* 1857 * !RUN -> RUN requires setting the association id 1858 * which is done with a firmware cmd. We also defer 1859 * starting the timers until that work is done. 1860 */ 1861 if ((error = wpi_run(sc, vap)) != 0) { 1862 device_printf(sc->sc_dev, 1863 "%s: could not move to RUN state\n", __func__); 1864 } 1865 break; 1866 1867 default: 1868 break; 1869 } 1870 if (error != 0) { 1871 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1872 return error; 1873 } 1874 1875 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1876 1877 return wvp->wv_newstate(vap, nstate, arg); 1878 } 1879 1880 static void 1881 wpi_calib_timeout(void *arg) 1882 { 1883 struct wpi_softc *sc = arg; 1884 1885 if (wpi_check_bss_filter(sc) == 0) 1886 return; 1887 1888 wpi_power_calibration(sc); 1889 1890 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1891 } 1892 1893 static __inline uint8_t 1894 rate2plcp(const uint8_t rate) 1895 { 1896 switch (rate) { 1897 case 12: return 0xd; 1898 case 18: return 0xf; 1899 case 24: return 0x5; 1900 case 36: return 0x7; 1901 case 48: return 0x9; 1902 case 72: return 0xb; 1903 case 96: return 0x1; 1904 case 108: return 0x3; 1905 case 2: return 10; 1906 case 4: return 20; 1907 case 11: return 55; 1908 case 22: return 110; 1909 default: return 0; 1910 } 1911 } 1912 1913 static __inline uint8_t 1914 plcp2rate(const uint8_t plcp) 1915 { 1916 switch (plcp) { 1917 case 0xd: return 12; 1918 case 0xf: return 18; 1919 case 0x5: return 24; 1920 case 0x7: return 36; 1921 case 0x9: return 48; 1922 case 0xb: return 72; 1923 case 0x1: return 96; 1924 case 0x3: return 108; 1925 case 10: return 2; 1926 case 20: return 4; 1927 case 55: return 11; 1928 case 110: return 22; 1929 default: return 0; 1930 } 1931 } 1932 1933 /* Quickly determine if a given rate is CCK or OFDM. */ 1934 #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1935 1936 static void 1937 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1938 struct wpi_rx_data *data) 1939 { 1940 struct ifnet *ifp = sc->sc_ifp; 1941 struct ieee80211com *ic = ifp->if_l2com; 1942 struct wpi_rx_ring *ring = &sc->rxq; 1943 struct wpi_rx_stat *stat; 1944 struct wpi_rx_head *head; 1945 struct wpi_rx_tail *tail; 1946 struct ieee80211_frame *wh; 1947 struct ieee80211_node *ni; 1948 struct mbuf *m, *m1; 1949 bus_addr_t paddr; 1950 uint32_t flags; 1951 uint16_t len; 1952 int error; 1953 1954 stat = (struct wpi_rx_stat *)(desc + 1); 1955 1956 if (stat->len > WPI_STAT_MAXLEN) { 1957 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1958 goto fail1; 1959 } 1960 1961 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1962 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1963 len = le16toh(head->len); 1964 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1965 flags = le32toh(tail->flags); 1966 1967 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1968 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1969 le32toh(desc->len), len, (int8_t)stat->rssi, 1970 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1971 1972 /* Discard frames with a bad FCS early. */ 1973 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1974 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1975 __func__, flags); 1976 goto fail1; 1977 } 1978 /* Discard frames that are too short. */ 1979 if (len < sizeof (struct ieee80211_frame_ack)) { 1980 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1981 __func__, len); 1982 goto fail1; 1983 } 1984 1985 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1986 if (m1 == NULL) { 1987 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1988 __func__); 1989 goto fail1; 1990 } 1991 bus_dmamap_unload(ring->data_dmat, data->map); 1992 1993 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1994 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1995 if (error != 0 && error != EFBIG) { 1996 device_printf(sc->sc_dev, 1997 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1998 m_freem(m1); 1999 2000 /* Try to reload the old mbuf. */ 2001 error = bus_dmamap_load(ring->data_dmat, data->map, 2002 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 2003 &paddr, BUS_DMA_NOWAIT); 2004 if (error != 0 && error != EFBIG) { 2005 panic("%s: could not load old RX mbuf", __func__); 2006 } 2007 /* Physical address may have changed. */ 2008 ring->desc[ring->cur] = htole32(paddr); 2009 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2010 BUS_DMASYNC_PREWRITE); 2011 goto fail1; 2012 } 2013 2014 m = data->m; 2015 data->m = m1; 2016 /* Update RX descriptor. */ 2017 ring->desc[ring->cur] = htole32(paddr); 2018 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2019 BUS_DMASYNC_PREWRITE); 2020 2021 /* Finalize mbuf. */ 2022 m->m_pkthdr.rcvif = ifp; 2023 m->m_data = (caddr_t)(head + 1); 2024 m->m_pkthdr.len = m->m_len = len; 2025 2026 /* Grab a reference to the source node. */ 2027 wh = mtod(m, struct ieee80211_frame *); 2028 2029 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2030 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 2031 /* Check whether decryption was successful or not. */ 2032 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 2033 DPRINTF(sc, WPI_DEBUG_RECV, 2034 "CCMP decryption failed 0x%x\n", flags); 2035 goto fail2; 2036 } 2037 m->m_flags |= M_WEP; 2038 } 2039 2040 if (len >= sizeof(struct ieee80211_frame_min)) 2041 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2042 else 2043 ni = NULL; 2044 2045 sc->rx_tstamp = tail->tstamp; 2046 2047 if (ieee80211_radiotap_active(ic)) { 2048 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 2049 2050 tap->wr_flags = 0; 2051 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 2052 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2053 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 2054 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 2055 tap->wr_tsft = tail->tstamp; 2056 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 2057 tap->wr_rate = plcp2rate(head->plcp); 2058 } 2059 2060 WPI_UNLOCK(sc); 2061 2062 /* Send the frame to the 802.11 layer. */ 2063 if (ni != NULL) { 2064 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 2065 /* Node is no longer needed. */ 2066 ieee80211_free_node(ni); 2067 } else 2068 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 2069 2070 WPI_LOCK(sc); 2071 2072 return; 2073 2074 fail2: m_freem(m); 2075 2076 fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2077 } 2078 2079 static void 2080 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 2081 struct wpi_rx_data *data) 2082 { 2083 /* Ignore */ 2084 } 2085 2086 static void 2087 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2088 { 2089 struct ifnet *ifp = sc->sc_ifp; 2090 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 2091 struct wpi_tx_data *data = &ring->data[desc->idx]; 2092 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 2093 struct mbuf *m; 2094 struct ieee80211_node *ni; 2095 struct ieee80211vap *vap; 2096 struct ieee80211com *ic; 2097 uint32_t status = le32toh(stat->status); 2098 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 2099 2100 KASSERT(data->ni != NULL, ("no node")); 2101 KASSERT(data->m != NULL, ("no mbuf")); 2102 2103 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2104 2105 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 2106 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 2107 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 2108 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 2109 2110 /* Unmap and free mbuf. */ 2111 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2112 bus_dmamap_unload(ring->data_dmat, data->map); 2113 m = data->m, data->m = NULL; 2114 ni = data->ni, data->ni = NULL; 2115 vap = ni->ni_vap; 2116 ic = vap->iv_ic; 2117 2118 /* 2119 * Update rate control statistics for the node. 2120 */ 2121 if (status & WPI_TX_STATUS_FAIL) { 2122 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2123 ieee80211_ratectl_tx_complete(vap, ni, 2124 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2125 } else { 2126 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2127 ieee80211_ratectl_tx_complete(vap, ni, 2128 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2129 } 2130 2131 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 2132 2133 WPI_TXQ_STATE_LOCK(sc); 2134 ring->queued -= 1; 2135 if (ring->queued > 0) { 2136 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2137 2138 if (sc->qfullmsk != 0 && 2139 ring->queued < WPI_TX_RING_LOMARK) { 2140 sc->qfullmsk &= ~(1 << ring->qid); 2141 IF_LOCK(&ifp->if_snd); 2142 if (sc->qfullmsk == 0 && 2143 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2144 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2145 IF_UNLOCK(&ifp->if_snd); 2146 ieee80211_runtask(ic, &sc->sc_start_task); 2147 } else 2148 IF_UNLOCK(&ifp->if_snd); 2149 } 2150 } else 2151 callout_stop(&sc->tx_timeout); 2152 WPI_TXQ_STATE_UNLOCK(sc); 2153 2154 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2155 } 2156 2157 /* 2158 * Process a "command done" firmware notification. This is where we wakeup 2159 * processes waiting for a synchronous command completion. 2160 */ 2161 static void 2162 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2163 { 2164 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2165 struct wpi_tx_data *data; 2166 2167 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2168 "type %s len %d\n", desc->qid, desc->idx, 2169 desc->flags, wpi_cmd_str(desc->type), 2170 le32toh(desc->len)); 2171 2172 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2173 return; /* Not a command ack. */ 2174 2175 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2176 2177 data = &ring->data[desc->idx]; 2178 2179 /* If the command was mapped in an mbuf, free it. */ 2180 if (data->m != NULL) { 2181 bus_dmamap_sync(ring->data_dmat, data->map, 2182 BUS_DMASYNC_POSTWRITE); 2183 bus_dmamap_unload(ring->data_dmat, data->map); 2184 m_freem(data->m); 2185 data->m = NULL; 2186 } 2187 2188 wakeup(&ring->cmd[desc->idx]); 2189 2190 if (desc->type == WPI_CMD_SET_POWER_MODE) { 2191 WPI_TXQ_LOCK(sc); 2192 if (sc->sc_flags & WPI_PS_PATH) { 2193 sc->sc_update_rx_ring = wpi_update_rx_ring_ps; 2194 sc->sc_update_tx_ring = wpi_update_tx_ring_ps; 2195 } else { 2196 sc->sc_update_rx_ring = wpi_update_rx_ring; 2197 sc->sc_update_tx_ring = wpi_update_tx_ring; 2198 } 2199 WPI_TXQ_UNLOCK(sc); 2200 } 2201 } 2202 2203 static void 2204 wpi_notif_intr(struct wpi_softc *sc) 2205 { 2206 struct ifnet *ifp = sc->sc_ifp; 2207 struct ieee80211com *ic = ifp->if_l2com; 2208 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2209 uint32_t hw; 2210 2211 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2212 BUS_DMASYNC_POSTREAD); 2213 2214 hw = le32toh(sc->shared->next) & 0xfff; 2215 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2216 2217 while (sc->rxq.cur != hw) { 2218 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2219 2220 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2221 struct wpi_rx_desc *desc; 2222 2223 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2224 BUS_DMASYNC_POSTREAD); 2225 desc = mtod(data->m, struct wpi_rx_desc *); 2226 2227 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2228 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2229 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2230 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2231 2232 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2233 /* Reply to a command. */ 2234 wpi_cmd_done(sc, desc); 2235 } 2236 2237 switch (desc->type) { 2238 case WPI_RX_DONE: 2239 /* An 802.11 frame has been received. */ 2240 wpi_rx_done(sc, desc, data); 2241 2242 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2243 /* wpi_stop() was called. */ 2244 return; 2245 } 2246 2247 break; 2248 2249 case WPI_TX_DONE: 2250 /* An 802.11 frame has been transmitted. */ 2251 wpi_tx_done(sc, desc); 2252 break; 2253 2254 case WPI_RX_STATISTICS: 2255 case WPI_BEACON_STATISTICS: 2256 wpi_rx_statistics(sc, desc, data); 2257 break; 2258 2259 case WPI_BEACON_MISSED: 2260 { 2261 struct wpi_beacon_missed *miss = 2262 (struct wpi_beacon_missed *)(desc + 1); 2263 uint32_t expected, misses, received, threshold; 2264 2265 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2266 BUS_DMASYNC_POSTREAD); 2267 2268 misses = le32toh(miss->consecutive); 2269 expected = le32toh(miss->expected); 2270 received = le32toh(miss->received); 2271 threshold = MAX(2, vap->iv_bmissthreshold); 2272 2273 DPRINTF(sc, WPI_DEBUG_BMISS, 2274 "%s: beacons missed %u(%u) (received %u/%u)\n", 2275 __func__, misses, le32toh(miss->total), received, 2276 expected); 2277 2278 if (misses >= threshold || 2279 (received == 0 && expected >= threshold)) { 2280 WPI_RXON_LOCK(sc); 2281 if (callout_pending(&sc->scan_timeout)) { 2282 wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL, 2283 0, 1); 2284 } 2285 WPI_RXON_UNLOCK(sc); 2286 if (vap->iv_state == IEEE80211_S_RUN && 2287 (ic->ic_flags & IEEE80211_F_SCAN) == 0) 2288 ieee80211_beacon_miss(ic); 2289 } 2290 2291 break; 2292 } 2293 #ifdef WPI_DEBUG 2294 case WPI_BEACON_SENT: 2295 { 2296 struct wpi_tx_stat *stat = 2297 (struct wpi_tx_stat *)(desc + 1); 2298 uint64_t *tsf = (uint64_t *)(stat + 1); 2299 uint32_t *mode = (uint32_t *)(tsf + 1); 2300 2301 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2302 BUS_DMASYNC_POSTREAD); 2303 2304 DPRINTF(sc, WPI_DEBUG_BEACON, 2305 "beacon sent: rts %u, ack %u, btkill %u, rate %u, " 2306 "duration %u, status %x, tsf %ju, mode %x\n", 2307 stat->rtsfailcnt, stat->ackfailcnt, 2308 stat->btkillcnt, stat->rate, le32toh(stat->duration), 2309 le32toh(stat->status), *tsf, *mode); 2310 2311 break; 2312 } 2313 #endif 2314 case WPI_UC_READY: 2315 { 2316 struct wpi_ucode_info *uc = 2317 (struct wpi_ucode_info *)(desc + 1); 2318 2319 /* The microcontroller is ready. */ 2320 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2321 BUS_DMASYNC_POSTREAD); 2322 DPRINTF(sc, WPI_DEBUG_RESET, 2323 "microcode alive notification version=%d.%d " 2324 "subtype=%x alive=%x\n", uc->major, uc->minor, 2325 uc->subtype, le32toh(uc->valid)); 2326 2327 if (le32toh(uc->valid) != 1) { 2328 device_printf(sc->sc_dev, 2329 "microcontroller initialization failed\n"); 2330 wpi_stop_locked(sc); 2331 } 2332 /* Save the address of the error log in SRAM. */ 2333 sc->errptr = le32toh(uc->errptr); 2334 break; 2335 } 2336 case WPI_STATE_CHANGED: 2337 { 2338 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2339 BUS_DMASYNC_POSTREAD); 2340 2341 uint32_t *status = (uint32_t *)(desc + 1); 2342 2343 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2344 le32toh(*status)); 2345 2346 if (le32toh(*status) & 1) { 2347 WPI_NT_LOCK(sc); 2348 wpi_clear_node_table(sc); 2349 WPI_NT_UNLOCK(sc); 2350 taskqueue_enqueue(sc->sc_tq, 2351 &sc->sc_radiooff_task); 2352 return; 2353 } 2354 break; 2355 } 2356 #ifdef WPI_DEBUG 2357 case WPI_START_SCAN: 2358 { 2359 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2360 BUS_DMASYNC_POSTREAD); 2361 2362 struct wpi_start_scan *scan = 2363 (struct wpi_start_scan *)(desc + 1); 2364 DPRINTF(sc, WPI_DEBUG_SCAN, 2365 "%s: scanning channel %d status %x\n", 2366 __func__, scan->chan, le32toh(scan->status)); 2367 2368 break; 2369 } 2370 #endif 2371 case WPI_STOP_SCAN: 2372 { 2373 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2374 BUS_DMASYNC_POSTREAD); 2375 2376 struct wpi_stop_scan *scan = 2377 (struct wpi_stop_scan *)(desc + 1); 2378 2379 DPRINTF(sc, WPI_DEBUG_SCAN, 2380 "scan finished nchan=%d status=%d chan=%d\n", 2381 scan->nchan, scan->status, scan->chan); 2382 2383 WPI_RXON_LOCK(sc); 2384 callout_stop(&sc->scan_timeout); 2385 WPI_RXON_UNLOCK(sc); 2386 if (scan->status == WPI_SCAN_ABORTED) 2387 ieee80211_cancel_scan(vap); 2388 else 2389 ieee80211_scan_next(vap); 2390 break; 2391 } 2392 } 2393 2394 if (sc->rxq.cur % 8 == 0) { 2395 /* Tell the firmware what we have processed. */ 2396 sc->sc_update_rx_ring(sc); 2397 } 2398 } 2399 } 2400 2401 /* 2402 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2403 * from power-down sleep mode. 2404 */ 2405 static void 2406 wpi_wakeup_intr(struct wpi_softc *sc) 2407 { 2408 int qid; 2409 2410 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2411 "%s: ucode wakeup from power-down sleep\n", __func__); 2412 2413 /* Wakeup RX and TX rings. */ 2414 if (sc->rxq.update) { 2415 sc->rxq.update = 0; 2416 wpi_update_rx_ring(sc); 2417 } 2418 WPI_TXQ_LOCK(sc); 2419 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2420 struct wpi_tx_ring *ring = &sc->txq[qid]; 2421 2422 if (ring->update) { 2423 ring->update = 0; 2424 wpi_update_tx_ring(sc, ring); 2425 } 2426 } 2427 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2428 WPI_TXQ_UNLOCK(sc); 2429 } 2430 2431 /* 2432 * This function prints firmware registers 2433 */ 2434 #ifdef WPI_DEBUG 2435 static void 2436 wpi_debug_registers(struct wpi_softc *sc) 2437 { 2438 size_t i; 2439 static const uint32_t csr_tbl[] = { 2440 WPI_HW_IF_CONFIG, 2441 WPI_INT, 2442 WPI_INT_MASK, 2443 WPI_FH_INT, 2444 WPI_GPIO_IN, 2445 WPI_RESET, 2446 WPI_GP_CNTRL, 2447 WPI_EEPROM, 2448 WPI_EEPROM_GP, 2449 WPI_GIO, 2450 WPI_UCODE_GP1, 2451 WPI_UCODE_GP2, 2452 WPI_GIO_CHICKEN, 2453 WPI_ANA_PLL, 2454 WPI_DBG_HPET_MEM, 2455 }; 2456 static const uint32_t prph_tbl[] = { 2457 WPI_APMG_CLK_CTRL, 2458 WPI_APMG_PS, 2459 WPI_APMG_PCI_STT, 2460 WPI_APMG_RFKILL, 2461 }; 2462 2463 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2464 2465 for (i = 0; i < nitems(csr_tbl); i++) { 2466 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2467 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2468 2469 if ((i + 1) % 2 == 0) 2470 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2471 } 2472 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2473 2474 if (wpi_nic_lock(sc) == 0) { 2475 for (i = 0; i < nitems(prph_tbl); i++) { 2476 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2477 wpi_get_prph_string(prph_tbl[i]), 2478 wpi_prph_read(sc, prph_tbl[i])); 2479 2480 if ((i + 1) % 2 == 0) 2481 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2482 } 2483 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2484 wpi_nic_unlock(sc); 2485 } else { 2486 DPRINTF(sc, WPI_DEBUG_REGISTER, 2487 "Cannot access internal registers.\n"); 2488 } 2489 } 2490 #endif 2491 2492 /* 2493 * Dump the error log of the firmware when a firmware panic occurs. Although 2494 * we can't debug the firmware because it is neither open source nor free, it 2495 * can help us to identify certain classes of problems. 2496 */ 2497 static void 2498 wpi_fatal_intr(struct wpi_softc *sc) 2499 { 2500 struct wpi_fw_dump dump; 2501 uint32_t i, offset, count; 2502 2503 /* Check that the error log address is valid. */ 2504 if (sc->errptr < WPI_FW_DATA_BASE || 2505 sc->errptr + sizeof (dump) > 2506 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2507 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2508 sc->errptr); 2509 return; 2510 } 2511 if (wpi_nic_lock(sc) != 0) { 2512 printf("%s: could not read firmware error log\n", __func__); 2513 return; 2514 } 2515 /* Read number of entries in the log. */ 2516 count = wpi_mem_read(sc, sc->errptr); 2517 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2518 printf("%s: invalid count field (count = %u)\n", __func__, 2519 count); 2520 wpi_nic_unlock(sc); 2521 return; 2522 } 2523 /* Skip "count" field. */ 2524 offset = sc->errptr + sizeof (uint32_t); 2525 printf("firmware error log (count = %u):\n", count); 2526 for (i = 0; i < count; i++) { 2527 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2528 sizeof (dump) / sizeof (uint32_t)); 2529 2530 printf(" error type = \"%s\" (0x%08X)\n", 2531 (dump.desc < nitems(wpi_fw_errmsg)) ? 2532 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2533 dump.desc); 2534 printf(" error data = 0x%08X\n", 2535 dump.data); 2536 printf(" branch link = 0x%08X%08X\n", 2537 dump.blink[0], dump.blink[1]); 2538 printf(" interrupt link = 0x%08X%08X\n", 2539 dump.ilink[0], dump.ilink[1]); 2540 printf(" time = %u\n", dump.time); 2541 2542 offset += sizeof (dump); 2543 } 2544 wpi_nic_unlock(sc); 2545 /* Dump driver status (TX and RX rings) while we're here. */ 2546 printf("driver status:\n"); 2547 WPI_TXQ_LOCK(sc); 2548 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2549 struct wpi_tx_ring *ring = &sc->txq[i]; 2550 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2551 i, ring->qid, ring->cur, ring->queued); 2552 } 2553 WPI_TXQ_UNLOCK(sc); 2554 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2555 } 2556 2557 static void 2558 wpi_intr(void *arg) 2559 { 2560 struct wpi_softc *sc = arg; 2561 struct ifnet *ifp = sc->sc_ifp; 2562 uint32_t r1, r2; 2563 2564 WPI_LOCK(sc); 2565 2566 /* Disable interrupts. */ 2567 WPI_WRITE(sc, WPI_INT_MASK, 0); 2568 2569 r1 = WPI_READ(sc, WPI_INT); 2570 2571 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2572 goto end; /* Hardware gone! */ 2573 2574 r2 = WPI_READ(sc, WPI_FH_INT); 2575 2576 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2577 r1, r2); 2578 2579 if (r1 == 0 && r2 == 0) 2580 goto done; /* Interrupt not for us. */ 2581 2582 /* Acknowledge interrupts. */ 2583 WPI_WRITE(sc, WPI_INT, r1); 2584 WPI_WRITE(sc, WPI_FH_INT, r2); 2585 2586 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2587 device_printf(sc->sc_dev, "fatal firmware error\n"); 2588 #ifdef WPI_DEBUG 2589 wpi_debug_registers(sc); 2590 #endif 2591 wpi_fatal_intr(sc); 2592 DPRINTF(sc, WPI_DEBUG_HW, 2593 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2594 "(Hardware Error)"); 2595 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2596 goto end; 2597 } 2598 2599 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2600 (r2 & WPI_FH_INT_RX)) 2601 wpi_notif_intr(sc); 2602 2603 if (r1 & WPI_INT_ALIVE) 2604 wakeup(sc); /* Firmware is alive. */ 2605 2606 if (r1 & WPI_INT_WAKEUP) 2607 wpi_wakeup_intr(sc); 2608 2609 done: 2610 /* Re-enable interrupts. */ 2611 if (ifp->if_flags & IFF_UP) 2612 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2613 2614 end: WPI_UNLOCK(sc); 2615 } 2616 2617 static int 2618 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2619 { 2620 struct ifnet *ifp = sc->sc_ifp; 2621 struct ieee80211_frame *wh; 2622 struct wpi_tx_cmd *cmd; 2623 struct wpi_tx_data *data; 2624 struct wpi_tx_desc *desc; 2625 struct wpi_tx_ring *ring; 2626 struct mbuf *m1; 2627 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2628 int error, i, hdrlen, nsegs, totlen, pad; 2629 2630 WPI_TXQ_LOCK(sc); 2631 2632 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2633 2634 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2635 2636 if (sc->txq_active == 0) { 2637 /* wpi_stop() was called */ 2638 error = ENETDOWN; 2639 goto fail; 2640 } 2641 2642 wh = mtod(buf->m, struct ieee80211_frame *); 2643 hdrlen = ieee80211_anyhdrsize(wh); 2644 totlen = buf->m->m_pkthdr.len; 2645 2646 if (hdrlen & 3) { 2647 /* First segment length must be a multiple of 4. */ 2648 pad = 4 - (hdrlen & 3); 2649 } else 2650 pad = 0; 2651 2652 ring = &sc->txq[buf->ac]; 2653 desc = &ring->desc[ring->cur]; 2654 data = &ring->data[ring->cur]; 2655 2656 /* Prepare TX firmware command. */ 2657 cmd = &ring->cmd[ring->cur]; 2658 cmd->code = buf->code; 2659 cmd->flags = 0; 2660 cmd->qid = ring->qid; 2661 cmd->idx = ring->cur; 2662 2663 memcpy(cmd->data, buf->data, buf->size); 2664 2665 /* Save and trim IEEE802.11 header. */ 2666 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2667 m_adj(buf->m, hdrlen); 2668 2669 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2670 segs, &nsegs, BUS_DMA_NOWAIT); 2671 if (error != 0 && error != EFBIG) { 2672 device_printf(sc->sc_dev, 2673 "%s: can't map mbuf (error %d)\n", __func__, error); 2674 goto fail; 2675 } 2676 if (error != 0) { 2677 /* Too many DMA segments, linearize mbuf. */ 2678 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2679 if (m1 == NULL) { 2680 device_printf(sc->sc_dev, 2681 "%s: could not defrag mbuf\n", __func__); 2682 error = ENOBUFS; 2683 goto fail; 2684 } 2685 buf->m = m1; 2686 2687 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2688 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2689 if (error != 0) { 2690 device_printf(sc->sc_dev, 2691 "%s: can't map mbuf (error %d)\n", __func__, 2692 error); 2693 goto fail; 2694 } 2695 } 2696 2697 KASSERT(nsegs < WPI_MAX_SCATTER, 2698 ("too many DMA segments, nsegs (%d) should be less than %d", 2699 nsegs, WPI_MAX_SCATTER)); 2700 2701 data->m = buf->m; 2702 data->ni = buf->ni; 2703 2704 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2705 __func__, ring->qid, ring->cur, totlen, nsegs); 2706 2707 /* Fill TX descriptor. */ 2708 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2709 /* First DMA segment is used by the TX command. */ 2710 desc->segs[0].addr = htole32(data->cmd_paddr); 2711 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2712 /* Other DMA segments are for data payload. */ 2713 seg = &segs[0]; 2714 for (i = 1; i <= nsegs; i++) { 2715 desc->segs[i].addr = htole32(seg->ds_addr); 2716 desc->segs[i].len = htole32(seg->ds_len); 2717 seg++; 2718 } 2719 2720 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2721 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2722 BUS_DMASYNC_PREWRITE); 2723 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2724 BUS_DMASYNC_PREWRITE); 2725 2726 /* Kick TX ring. */ 2727 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2728 sc->sc_update_tx_ring(sc, ring); 2729 2730 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2731 /* Mark TX ring as full if we reach a certain threshold. */ 2732 WPI_TXQ_STATE_LOCK(sc); 2733 if (++ring->queued > WPI_TX_RING_HIMARK) { 2734 sc->qfullmsk |= 1 << ring->qid; 2735 2736 IF_LOCK(&ifp->if_snd); 2737 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2738 IF_UNLOCK(&ifp->if_snd); 2739 } 2740 2741 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2742 WPI_TXQ_STATE_UNLOCK(sc); 2743 } 2744 2745 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2746 2747 WPI_TXQ_UNLOCK(sc); 2748 2749 return 0; 2750 2751 fail: m_freem(buf->m); 2752 2753 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2754 2755 WPI_TXQ_UNLOCK(sc); 2756 2757 return error; 2758 } 2759 2760 /* 2761 * Construct the data packet for a transmit buffer. 2762 */ 2763 static int 2764 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2765 { 2766 const struct ieee80211_txparam *tp; 2767 struct ieee80211vap *vap = ni->ni_vap; 2768 struct ieee80211com *ic = ni->ni_ic; 2769 struct wpi_node *wn = WPI_NODE(ni); 2770 struct ieee80211_channel *chan; 2771 struct ieee80211_frame *wh; 2772 struct ieee80211_key *k = NULL; 2773 struct wpi_buf tx_data; 2774 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2775 uint32_t flags; 2776 uint16_t qos; 2777 uint8_t tid, type; 2778 int ac, error, swcrypt, rate, ismcast, totlen; 2779 2780 wh = mtod(m, struct ieee80211_frame *); 2781 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2782 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2783 2784 /* Select EDCA Access Category and TX ring for this frame. */ 2785 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2786 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2787 tid = qos & IEEE80211_QOS_TID; 2788 } else { 2789 qos = 0; 2790 tid = 0; 2791 } 2792 ac = M_WME_GETAC(m); 2793 2794 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2795 ni->ni_chan : ic->ic_curchan; 2796 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2797 2798 /* Choose a TX rate index. */ 2799 if (type == IEEE80211_FC0_TYPE_MGT) 2800 rate = tp->mgmtrate; 2801 else if (ismcast) 2802 rate = tp->mcastrate; 2803 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2804 rate = tp->ucastrate; 2805 else if (m->m_flags & M_EAPOL) 2806 rate = tp->mgmtrate; 2807 else { 2808 /* XXX pass pktlen */ 2809 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2810 rate = ni->ni_txrate; 2811 } 2812 2813 /* Encrypt the frame if need be. */ 2814 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2815 /* Retrieve key for TX. */ 2816 k = ieee80211_crypto_encap(ni, m); 2817 if (k == NULL) { 2818 error = ENOBUFS; 2819 goto fail; 2820 } 2821 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2822 2823 /* 802.11 header may have moved. */ 2824 wh = mtod(m, struct ieee80211_frame *); 2825 } 2826 totlen = m->m_pkthdr.len; 2827 2828 if (ieee80211_radiotap_active_vap(vap)) { 2829 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2830 2831 tap->wt_flags = 0; 2832 tap->wt_rate = rate; 2833 if (k != NULL) 2834 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2835 2836 ieee80211_radiotap_tx(vap, m); 2837 } 2838 2839 flags = 0; 2840 if (!ismcast) { 2841 /* Unicast frame, check if an ACK is expected. */ 2842 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2843 IEEE80211_QOS_ACKPOLICY_NOACK) 2844 flags |= WPI_TX_NEED_ACK; 2845 } 2846 2847 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2848 flags |= WPI_TX_AUTO_SEQ; 2849 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2850 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2851 2852 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2853 if (!ismcast) { 2854 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2855 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2856 flags |= WPI_TX_NEED_RTS; 2857 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2858 WPI_RATE_IS_OFDM(rate)) { 2859 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2860 flags |= WPI_TX_NEED_CTS; 2861 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2862 flags |= WPI_TX_NEED_RTS; 2863 } 2864 2865 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2866 flags |= WPI_TX_FULL_TXOP; 2867 } 2868 2869 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2870 if (type == IEEE80211_FC0_TYPE_MGT) { 2871 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2872 2873 /* Tell HW to set timestamp in probe responses. */ 2874 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2875 flags |= WPI_TX_INSERT_TSTAMP; 2876 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2877 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2878 tx->timeout = htole16(3); 2879 else 2880 tx->timeout = htole16(2); 2881 } 2882 2883 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2884 tx->id = WPI_ID_BROADCAST; 2885 else { 2886 if (wn->id == WPI_ID_UNDEFINED) { 2887 device_printf(sc->sc_dev, 2888 "%s: undefined node id\n", __func__); 2889 error = EINVAL; 2890 goto fail; 2891 } 2892 2893 tx->id = wn->id; 2894 } 2895 2896 if (k != NULL && !swcrypt) { 2897 switch (k->wk_cipher->ic_cipher) { 2898 case IEEE80211_CIPHER_AES_CCM: 2899 tx->security = WPI_CIPHER_CCMP; 2900 break; 2901 2902 default: 2903 break; 2904 } 2905 2906 memcpy(tx->key, k->wk_key, k->wk_keylen); 2907 } 2908 2909 tx->len = htole16(totlen); 2910 tx->flags = htole32(flags); 2911 tx->plcp = rate2plcp(rate); 2912 tx->tid = tid; 2913 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2914 tx->ofdm_mask = 0xff; 2915 tx->cck_mask = 0x0f; 2916 tx->rts_ntries = 7; 2917 tx->data_ntries = tp->maxretry; 2918 2919 tx_data.ni = ni; 2920 tx_data.m = m; 2921 tx_data.size = sizeof(struct wpi_cmd_data); 2922 tx_data.code = WPI_CMD_TX_DATA; 2923 tx_data.ac = ac; 2924 2925 return wpi_cmd2(sc, &tx_data); 2926 2927 fail: m_freem(m); 2928 return error; 2929 } 2930 2931 static int 2932 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2933 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2934 { 2935 struct ieee80211vap *vap = ni->ni_vap; 2936 struct ieee80211_key *k = NULL; 2937 struct ieee80211_frame *wh; 2938 struct wpi_buf tx_data; 2939 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2940 uint32_t flags; 2941 uint8_t type; 2942 int ac, rate, swcrypt, totlen; 2943 2944 wh = mtod(m, struct ieee80211_frame *); 2945 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2946 2947 ac = params->ibp_pri & 3; 2948 2949 /* Choose a TX rate index. */ 2950 rate = params->ibp_rate0; 2951 2952 flags = 0; 2953 if (!IEEE80211_QOS_HAS_SEQ(wh)) 2954 flags |= WPI_TX_AUTO_SEQ; 2955 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2956 flags |= WPI_TX_NEED_ACK; 2957 if (params->ibp_flags & IEEE80211_BPF_RTS) 2958 flags |= WPI_TX_NEED_RTS; 2959 if (params->ibp_flags & IEEE80211_BPF_CTS) 2960 flags |= WPI_TX_NEED_CTS; 2961 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2962 flags |= WPI_TX_FULL_TXOP; 2963 2964 /* Encrypt the frame if need be. */ 2965 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2966 /* Retrieve key for TX. */ 2967 k = ieee80211_crypto_encap(ni, m); 2968 if (k == NULL) { 2969 m_freem(m); 2970 return ENOBUFS; 2971 } 2972 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2973 2974 /* 802.11 header may have moved. */ 2975 wh = mtod(m, struct ieee80211_frame *); 2976 } 2977 totlen = m->m_pkthdr.len; 2978 2979 if (ieee80211_radiotap_active_vap(vap)) { 2980 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2981 2982 tap->wt_flags = 0; 2983 tap->wt_rate = rate; 2984 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2985 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2986 2987 ieee80211_radiotap_tx(vap, m); 2988 } 2989 2990 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2991 if (type == IEEE80211_FC0_TYPE_MGT) { 2992 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2993 2994 /* Tell HW to set timestamp in probe responses. */ 2995 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2996 flags |= WPI_TX_INSERT_TSTAMP; 2997 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2998 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2999 tx->timeout = htole16(3); 3000 else 3001 tx->timeout = htole16(2); 3002 } 3003 3004 if (k != NULL && !swcrypt) { 3005 switch (k->wk_cipher->ic_cipher) { 3006 case IEEE80211_CIPHER_AES_CCM: 3007 tx->security = WPI_CIPHER_CCMP; 3008 break; 3009 3010 default: 3011 break; 3012 } 3013 3014 memcpy(tx->key, k->wk_key, k->wk_keylen); 3015 } 3016 3017 tx->len = htole16(totlen); 3018 tx->flags = htole32(flags); 3019 tx->plcp = rate2plcp(rate); 3020 tx->id = WPI_ID_BROADCAST; 3021 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3022 tx->rts_ntries = params->ibp_try1; 3023 tx->data_ntries = params->ibp_try0; 3024 3025 tx_data.ni = ni; 3026 tx_data.m = m; 3027 tx_data.size = sizeof(struct wpi_cmd_data); 3028 tx_data.code = WPI_CMD_TX_DATA; 3029 tx_data.ac = ac; 3030 3031 return wpi_cmd2(sc, &tx_data); 3032 } 3033 3034 static int 3035 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3036 const struct ieee80211_bpf_params *params) 3037 { 3038 struct ieee80211com *ic = ni->ni_ic; 3039 struct ifnet *ifp = ic->ic_ifp; 3040 struct wpi_softc *sc = ifp->if_softc; 3041 int error = 0; 3042 3043 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3044 3045 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3046 ieee80211_free_node(ni); 3047 m_freem(m); 3048 return ENETDOWN; 3049 } 3050 3051 WPI_TX_LOCK(sc); 3052 if (params == NULL) { 3053 /* 3054 * Legacy path; interpret frame contents to decide 3055 * precisely how to send the frame. 3056 */ 3057 error = wpi_tx_data(sc, m, ni); 3058 } else { 3059 /* 3060 * Caller supplied explicit parameters to use in 3061 * sending the frame. 3062 */ 3063 error = wpi_tx_data_raw(sc, m, ni, params); 3064 } 3065 WPI_TX_UNLOCK(sc); 3066 3067 if (error != 0) { 3068 /* NB: m is reclaimed on tx failure */ 3069 ieee80211_free_node(ni); 3070 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3071 3072 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3073 3074 return error; 3075 } 3076 3077 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3078 3079 return 0; 3080 } 3081 3082 /** 3083 * Process data waiting to be sent on the IFNET output queue 3084 */ 3085 static void 3086 wpi_start(struct ifnet *ifp) 3087 { 3088 struct wpi_softc *sc = ifp->if_softc; 3089 struct ieee80211_node *ni; 3090 struct mbuf *m; 3091 3092 WPI_TX_LOCK(sc); 3093 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 3094 3095 for (;;) { 3096 IF_LOCK(&ifp->if_snd); 3097 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 3098 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3099 IF_UNLOCK(&ifp->if_snd); 3100 break; 3101 } 3102 IF_UNLOCK(&ifp->if_snd); 3103 3104 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3105 if (m == NULL) 3106 break; 3107 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3108 if (wpi_tx_data(sc, m, ni) != 0) { 3109 ieee80211_free_node(ni); 3110 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3111 } 3112 } 3113 3114 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 3115 WPI_TX_UNLOCK(sc); 3116 } 3117 3118 static void 3119 wpi_start_task(void *arg0, int pending) 3120 { 3121 struct wpi_softc *sc = arg0; 3122 struct ifnet *ifp = sc->sc_ifp; 3123 3124 wpi_start(ifp); 3125 } 3126 3127 static void 3128 wpi_watchdog_rfkill(void *arg) 3129 { 3130 struct wpi_softc *sc = arg; 3131 struct ifnet *ifp = sc->sc_ifp; 3132 struct ieee80211com *ic = ifp->if_l2com; 3133 3134 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 3135 3136 /* No need to lock firmware memory. */ 3137 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 3138 /* Radio kill switch is still off. */ 3139 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 3140 sc); 3141 } else 3142 ieee80211_runtask(ic, &sc->sc_radioon_task); 3143 } 3144 3145 static void 3146 wpi_scan_timeout(void *arg) 3147 { 3148 struct wpi_softc *sc = arg; 3149 struct ifnet *ifp = sc->sc_ifp; 3150 3151 if_printf(ifp, "scan timeout\n"); 3152 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3153 } 3154 3155 static void 3156 wpi_tx_timeout(void *arg) 3157 { 3158 struct wpi_softc *sc = arg; 3159 struct ifnet *ifp = sc->sc_ifp; 3160 3161 if_printf(ifp, "device timeout\n"); 3162 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 3163 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 3164 } 3165 3166 static int 3167 wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3168 { 3169 struct wpi_softc *sc = ifp->if_softc; 3170 struct ieee80211com *ic = ifp->if_l2com; 3171 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3172 struct ifreq *ifr = (struct ifreq *) data; 3173 int error = 0; 3174 3175 switch (cmd) { 3176 case SIOCGIFADDR: 3177 error = ether_ioctl(ifp, cmd, data); 3178 break; 3179 case SIOCSIFFLAGS: 3180 if (ifp->if_flags & IFF_UP) { 3181 wpi_init(sc); 3182 3183 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && 3184 vap != NULL) 3185 ieee80211_stop(vap); 3186 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3187 wpi_stop(sc); 3188 break; 3189 case SIOCGIFMEDIA: 3190 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3191 break; 3192 default: 3193 error = EINVAL; 3194 break; 3195 } 3196 return error; 3197 } 3198 3199 /* 3200 * Send a command to the firmware. 3201 */ 3202 static int 3203 wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3204 int async) 3205 { 3206 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3207 struct wpi_tx_desc *desc; 3208 struct wpi_tx_data *data; 3209 struct wpi_tx_cmd *cmd; 3210 struct mbuf *m; 3211 bus_addr_t paddr; 3212 int totlen, error; 3213 3214 WPI_TXQ_LOCK(sc); 3215 3216 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3217 3218 if (sc->txq_active == 0) { 3219 /* wpi_stop() was called */ 3220 error = 0; 3221 goto fail; 3222 } 3223 3224 if (async == 0) 3225 WPI_LOCK_ASSERT(sc); 3226 3227 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3228 __func__, wpi_cmd_str(code), size, async); 3229 3230 desc = &ring->desc[ring->cur]; 3231 data = &ring->data[ring->cur]; 3232 totlen = 4 + size; 3233 3234 if (size > sizeof cmd->data) { 3235 /* Command is too large to fit in a descriptor. */ 3236 if (totlen > MCLBYTES) { 3237 error = EINVAL; 3238 goto fail; 3239 } 3240 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3241 if (m == NULL) { 3242 error = ENOMEM; 3243 goto fail; 3244 } 3245 cmd = mtod(m, struct wpi_tx_cmd *); 3246 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3247 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3248 if (error != 0) { 3249 m_freem(m); 3250 goto fail; 3251 } 3252 data->m = m; 3253 } else { 3254 cmd = &ring->cmd[ring->cur]; 3255 paddr = data->cmd_paddr; 3256 } 3257 3258 cmd->code = code; 3259 cmd->flags = 0; 3260 cmd->qid = ring->qid; 3261 cmd->idx = ring->cur; 3262 memcpy(cmd->data, buf, size); 3263 3264 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3265 desc->segs[0].addr = htole32(paddr); 3266 desc->segs[0].len = htole32(totlen); 3267 3268 if (size > sizeof cmd->data) { 3269 bus_dmamap_sync(ring->data_dmat, data->map, 3270 BUS_DMASYNC_PREWRITE); 3271 } else { 3272 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3273 BUS_DMASYNC_PREWRITE); 3274 } 3275 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3276 BUS_DMASYNC_PREWRITE); 3277 3278 /* Kick command ring. */ 3279 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3280 sc->sc_update_tx_ring(sc, ring); 3281 3282 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3283 3284 WPI_TXQ_UNLOCK(sc); 3285 3286 if (async) 3287 return 0; 3288 3289 return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3290 3291 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3292 3293 WPI_TXQ_UNLOCK(sc); 3294 3295 return error; 3296 } 3297 3298 /* 3299 * Configure HW multi-rate retries. 3300 */ 3301 static int 3302 wpi_mrr_setup(struct wpi_softc *sc) 3303 { 3304 struct ifnet *ifp = sc->sc_ifp; 3305 struct ieee80211com *ic = ifp->if_l2com; 3306 struct wpi_mrr_setup mrr; 3307 int i, error; 3308 3309 /* CCK rates (not used with 802.11a). */ 3310 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3311 mrr.rates[i].flags = 0; 3312 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3313 /* Fallback to the immediate lower CCK rate (if any.) */ 3314 mrr.rates[i].next = 3315 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3316 /* Try twice at this rate before falling back to "next". */ 3317 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3318 } 3319 /* OFDM rates (not used with 802.11b). */ 3320 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3321 mrr.rates[i].flags = 0; 3322 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3323 /* Fallback to the immediate lower rate (if any.) */ 3324 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3325 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3326 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3327 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3328 i - 1; 3329 /* Try twice at this rate before falling back to "next". */ 3330 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3331 } 3332 /* Setup MRR for control frames. */ 3333 mrr.which = htole32(WPI_MRR_CTL); 3334 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3335 if (error != 0) { 3336 device_printf(sc->sc_dev, 3337 "could not setup MRR for control frames\n"); 3338 return error; 3339 } 3340 /* Setup MRR for data frames. */ 3341 mrr.which = htole32(WPI_MRR_DATA); 3342 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3343 if (error != 0) { 3344 device_printf(sc->sc_dev, 3345 "could not setup MRR for data frames\n"); 3346 return error; 3347 } 3348 return 0; 3349 } 3350 3351 static int 3352 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3353 { 3354 struct ieee80211com *ic = ni->ni_ic; 3355 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3356 struct wpi_node *wn = WPI_NODE(ni); 3357 struct wpi_node_info node; 3358 int error; 3359 3360 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3361 3362 if (wn->id == WPI_ID_UNDEFINED) 3363 return EINVAL; 3364 3365 memset(&node, 0, sizeof node); 3366 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3367 node.id = wn->id; 3368 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3369 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3370 node.action = htole32(WPI_ACTION_SET_RATE); 3371 node.antenna = WPI_ANTENNA_BOTH; 3372 3373 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3374 wn->id, ether_sprintf(ni->ni_macaddr)); 3375 3376 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3377 if (error != 0) { 3378 device_printf(sc->sc_dev, 3379 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3380 error); 3381 return error; 3382 } 3383 3384 if (wvp->wv_gtk != 0) { 3385 error = wpi_set_global_keys(ni); 3386 if (error != 0) { 3387 device_printf(sc->sc_dev, 3388 "%s: error while setting global keys\n", __func__); 3389 return ENXIO; 3390 } 3391 } 3392 3393 return 0; 3394 } 3395 3396 /* 3397 * Broadcast node is used to send group-addressed and management frames. 3398 */ 3399 static int 3400 wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3401 { 3402 struct ifnet *ifp = sc->sc_ifp; 3403 struct ieee80211com *ic = ifp->if_l2com; 3404 struct wpi_node_info node; 3405 3406 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3407 3408 memset(&node, 0, sizeof node); 3409 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3410 node.id = WPI_ID_BROADCAST; 3411 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3412 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3413 node.action = htole32(WPI_ACTION_SET_RATE); 3414 node.antenna = WPI_ANTENNA_BOTH; 3415 3416 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3417 3418 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3419 } 3420 3421 static int 3422 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3423 { 3424 struct wpi_node *wn = WPI_NODE(ni); 3425 int error; 3426 3427 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3428 3429 wn->id = wpi_add_node_entry_sta(sc); 3430 3431 if ((error = wpi_add_node(sc, ni)) != 0) { 3432 wpi_del_node_entry(sc, wn->id); 3433 wn->id = WPI_ID_UNDEFINED; 3434 return error; 3435 } 3436 3437 return 0; 3438 } 3439 3440 static int 3441 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3442 { 3443 struct wpi_node *wn = WPI_NODE(ni); 3444 int error; 3445 3446 KASSERT(wn->id == WPI_ID_UNDEFINED, 3447 ("the node %d was added before", wn->id)); 3448 3449 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3450 3451 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3452 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3453 return ENOMEM; 3454 } 3455 3456 if ((error = wpi_add_node(sc, ni)) != 0) { 3457 wpi_del_node_entry(sc, wn->id); 3458 wn->id = WPI_ID_UNDEFINED; 3459 return error; 3460 } 3461 3462 return 0; 3463 } 3464 3465 static void 3466 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3467 { 3468 struct wpi_node *wn = WPI_NODE(ni); 3469 struct wpi_cmd_del_node node; 3470 int error; 3471 3472 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3473 3474 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3475 3476 memset(&node, 0, sizeof node); 3477 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3478 node.count = 1; 3479 3480 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3481 wn->id, ether_sprintf(ni->ni_macaddr)); 3482 3483 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3484 if (error != 0) { 3485 device_printf(sc->sc_dev, 3486 "%s: could not delete node %u, error %d\n", __func__, 3487 wn->id, error); 3488 } 3489 } 3490 3491 static int 3492 wpi_updateedca(struct ieee80211com *ic) 3493 { 3494 #define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3495 struct wpi_softc *sc = ic->ic_ifp->if_softc; 3496 struct wpi_edca_params cmd; 3497 int aci, error; 3498 3499 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3500 3501 memset(&cmd, 0, sizeof cmd); 3502 cmd.flags = htole32(WPI_EDCA_UPDATE); 3503 for (aci = 0; aci < WME_NUM_AC; aci++) { 3504 const struct wmeParams *ac = 3505 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3506 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3507 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3508 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3509 cmd.ac[aci].txoplimit = 3510 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3511 3512 DPRINTF(sc, WPI_DEBUG_EDCA, 3513 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3514 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3515 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3516 cmd.ac[aci].txoplimit); 3517 } 3518 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3519 3520 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3521 3522 return error; 3523 #undef WPI_EXP2 3524 } 3525 3526 static void 3527 wpi_set_promisc(struct wpi_softc *sc) 3528 { 3529 struct ifnet *ifp = sc->sc_ifp; 3530 struct ieee80211com *ic = ifp->if_l2com; 3531 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3532 uint32_t promisc_filter; 3533 3534 promisc_filter = WPI_FILTER_CTL; 3535 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3536 promisc_filter |= WPI_FILTER_PROMISC; 3537 3538 if (ifp->if_flags & IFF_PROMISC) 3539 sc->rxon.filter |= htole32(promisc_filter); 3540 else 3541 sc->rxon.filter &= ~htole32(promisc_filter); 3542 } 3543 3544 static void 3545 wpi_update_promisc(struct ieee80211com *ic) 3546 { 3547 struct wpi_softc *sc = ic->ic_softc; 3548 3549 WPI_RXON_LOCK(sc); 3550 wpi_set_promisc(sc); 3551 3552 if (wpi_send_rxon(sc, 1, 1) != 0) { 3553 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3554 __func__); 3555 } 3556 WPI_RXON_UNLOCK(sc); 3557 } 3558 3559 static void 3560 wpi_update_mcast(struct ieee80211com *ic) 3561 { 3562 /* Ignore */ 3563 } 3564 3565 static void 3566 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3567 { 3568 struct wpi_cmd_led led; 3569 3570 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3571 3572 led.which = which; 3573 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3574 led.off = off; 3575 led.on = on; 3576 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3577 } 3578 3579 static int 3580 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3581 { 3582 struct wpi_cmd_timing cmd; 3583 uint64_t val, mod; 3584 3585 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3586 3587 memset(&cmd, 0, sizeof cmd); 3588 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3589 cmd.bintval = htole16(ni->ni_intval); 3590 cmd.lintval = htole16(10); 3591 3592 /* Compute remaining time until next beacon. */ 3593 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3594 mod = le64toh(cmd.tstamp) % val; 3595 cmd.binitval = htole32((uint32_t)(val - mod)); 3596 3597 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3598 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3599 3600 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3601 } 3602 3603 /* 3604 * This function is called periodically (every 60 seconds) to adjust output 3605 * power to temperature changes. 3606 */ 3607 static void 3608 wpi_power_calibration(struct wpi_softc *sc) 3609 { 3610 int temp; 3611 3612 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3613 3614 /* Update sensor data. */ 3615 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3616 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3617 3618 /* Sanity-check read value. */ 3619 if (temp < -260 || temp > 25) { 3620 /* This can't be correct, ignore. */ 3621 DPRINTF(sc, WPI_DEBUG_TEMP, 3622 "out-of-range temperature reported: %d\n", temp); 3623 return; 3624 } 3625 3626 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3627 3628 /* Adjust Tx power if need be. */ 3629 if (abs(temp - sc->temp) <= 6) 3630 return; 3631 3632 sc->temp = temp; 3633 3634 if (wpi_set_txpower(sc, 1) != 0) { 3635 /* just warn, too bad for the automatic calibration... */ 3636 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3637 } 3638 } 3639 3640 /* 3641 * Set TX power for current channel. 3642 */ 3643 static int 3644 wpi_set_txpower(struct wpi_softc *sc, int async) 3645 { 3646 struct wpi_power_group *group; 3647 struct wpi_cmd_txpower cmd; 3648 uint8_t chan; 3649 int idx, is_chan_5ghz, i; 3650 3651 /* Retrieve current channel from last RXON. */ 3652 chan = sc->rxon.chan; 3653 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3654 3655 /* Find the TX power group to which this channel belongs. */ 3656 if (is_chan_5ghz) { 3657 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3658 if (chan <= group->chan) 3659 break; 3660 } else 3661 group = &sc->groups[0]; 3662 3663 memset(&cmd, 0, sizeof cmd); 3664 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3665 cmd.chan = htole16(chan); 3666 3667 /* Set TX power for all OFDM and CCK rates. */ 3668 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3669 /* Retrieve TX power for this channel/rate. */ 3670 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3671 3672 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3673 3674 if (is_chan_5ghz) { 3675 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3676 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3677 } else { 3678 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3679 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3680 } 3681 DPRINTF(sc, WPI_DEBUG_TEMP, 3682 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3683 } 3684 3685 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3686 } 3687 3688 /* 3689 * Determine Tx power index for a given channel/rate combination. 3690 * This takes into account the regulatory information from EEPROM and the 3691 * current temperature. 3692 */ 3693 static int 3694 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3695 uint8_t chan, int is_chan_5ghz, int ridx) 3696 { 3697 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3698 #define fdivround(a, b, n) \ 3699 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3700 3701 /* Linear interpolation. */ 3702 #define interpolate(x, x1, y1, x2, y2, n) \ 3703 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3704 3705 struct wpi_power_sample *sample; 3706 int pwr, idx; 3707 3708 /* Default TX power is group maximum TX power minus 3dB. */ 3709 pwr = group->maxpwr / 2; 3710 3711 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3712 switch (ridx) { 3713 case WPI_RIDX_OFDM36: 3714 pwr -= is_chan_5ghz ? 5 : 0; 3715 break; 3716 case WPI_RIDX_OFDM48: 3717 pwr -= is_chan_5ghz ? 10 : 7; 3718 break; 3719 case WPI_RIDX_OFDM54: 3720 pwr -= is_chan_5ghz ? 12 : 9; 3721 break; 3722 } 3723 3724 /* Never exceed the channel maximum allowed TX power. */ 3725 pwr = min(pwr, sc->maxpwr[chan]); 3726 3727 /* Retrieve TX power index into gain tables from samples. */ 3728 for (sample = group->samples; sample < &group->samples[3]; sample++) 3729 if (pwr > sample[1].power) 3730 break; 3731 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3732 idx = interpolate(pwr, sample[0].power, sample[0].index, 3733 sample[1].power, sample[1].index, 19); 3734 3735 /*- 3736 * Adjust power index based on current temperature: 3737 * - if cooler than factory-calibrated: decrease output power 3738 * - if warmer than factory-calibrated: increase output power 3739 */ 3740 idx -= (sc->temp - group->temp) * 11 / 100; 3741 3742 /* Decrease TX power for CCK rates (-5dB). */ 3743 if (ridx >= WPI_RIDX_CCK1) 3744 idx += 10; 3745 3746 /* Make sure idx stays in a valid range. */ 3747 if (idx < 0) 3748 return 0; 3749 if (idx > WPI_MAX_PWR_INDEX) 3750 return WPI_MAX_PWR_INDEX; 3751 return idx; 3752 3753 #undef interpolate 3754 #undef fdivround 3755 } 3756 3757 /* 3758 * Set STA mode power saving level (between 0 and 5). 3759 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3760 */ 3761 static int 3762 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3763 { 3764 struct wpi_pmgt_cmd cmd; 3765 const struct wpi_pmgt *pmgt; 3766 uint32_t max, skip_dtim; 3767 uint32_t reg; 3768 int i; 3769 3770 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3771 "%s: dtim=%d, level=%d, async=%d\n", 3772 __func__, dtim, level, async); 3773 3774 /* Select which PS parameters to use. */ 3775 if (dtim <= 10) 3776 pmgt = &wpi_pmgt[0][level]; 3777 else 3778 pmgt = &wpi_pmgt[1][level]; 3779 3780 memset(&cmd, 0, sizeof cmd); 3781 WPI_TXQ_LOCK(sc); 3782 if (level != 0) { /* not CAM */ 3783 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3784 sc->sc_flags |= WPI_PS_PATH; 3785 } else 3786 sc->sc_flags &= ~WPI_PS_PATH; 3787 WPI_TXQ_UNLOCK(sc); 3788 /* Retrieve PCIe Active State Power Management (ASPM). */ 3789 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3790 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3791 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3792 3793 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3794 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3795 3796 if (dtim == 0) { 3797 dtim = 1; 3798 skip_dtim = 0; 3799 } else 3800 skip_dtim = pmgt->skip_dtim; 3801 3802 if (skip_dtim != 0) { 3803 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3804 max = pmgt->intval[4]; 3805 if (max == (uint32_t)-1) 3806 max = dtim * (skip_dtim + 1); 3807 else if (max > dtim) 3808 max = (max / dtim) * dtim; 3809 } else 3810 max = dtim; 3811 3812 for (i = 0; i < 5; i++) 3813 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3814 3815 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3816 } 3817 3818 static int 3819 wpi_send_btcoex(struct wpi_softc *sc) 3820 { 3821 struct wpi_bluetooth cmd; 3822 3823 memset(&cmd, 0, sizeof cmd); 3824 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3825 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3826 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3827 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3828 __func__); 3829 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3830 } 3831 3832 static int 3833 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3834 { 3835 int error; 3836 3837 if (async) 3838 WPI_RXON_LOCK_ASSERT(sc); 3839 3840 if (assoc && wpi_check_bss_filter(sc) != 0) { 3841 struct wpi_assoc rxon_assoc; 3842 3843 rxon_assoc.flags = sc->rxon.flags; 3844 rxon_assoc.filter = sc->rxon.filter; 3845 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3846 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3847 rxon_assoc.reserved = 0; 3848 3849 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3850 sizeof (struct wpi_assoc), async); 3851 if (error != 0) { 3852 device_printf(sc->sc_dev, 3853 "RXON_ASSOC command failed, error %d\n", error); 3854 return error; 3855 } 3856 } else { 3857 if (async) { 3858 WPI_NT_LOCK(sc); 3859 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3860 sizeof (struct wpi_rxon), async); 3861 if (error == 0) 3862 wpi_clear_node_table(sc); 3863 WPI_NT_UNLOCK(sc); 3864 } else { 3865 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3866 sizeof (struct wpi_rxon), async); 3867 if (error == 0) 3868 wpi_clear_node_table(sc); 3869 } 3870 3871 if (error != 0) { 3872 device_printf(sc->sc_dev, 3873 "RXON command failed, error %d\n", error); 3874 return error; 3875 } 3876 3877 /* Add broadcast node. */ 3878 error = wpi_add_broadcast_node(sc, async); 3879 if (error != 0) { 3880 device_printf(sc->sc_dev, 3881 "could not add broadcast node, error %d\n", error); 3882 return error; 3883 } 3884 } 3885 3886 /* Configuration has changed, set Tx power accordingly. */ 3887 if ((error = wpi_set_txpower(sc, async)) != 0) { 3888 device_printf(sc->sc_dev, 3889 "%s: could not set TX power, error %d\n", __func__, error); 3890 return error; 3891 } 3892 3893 return 0; 3894 } 3895 3896 /** 3897 * Configure the card to listen to a particular channel, this transisions the 3898 * card in to being able to receive frames from remote devices. 3899 */ 3900 static int 3901 wpi_config(struct wpi_softc *sc) 3902 { 3903 struct ifnet *ifp = sc->sc_ifp; 3904 struct ieee80211com *ic = ifp->if_l2com; 3905 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3906 struct ieee80211_channel *c = ic->ic_curchan; 3907 int error; 3908 3909 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3910 3911 /* Set power saving level to CAM during initialization. */ 3912 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3913 device_printf(sc->sc_dev, 3914 "%s: could not set power saving level\n", __func__); 3915 return error; 3916 } 3917 3918 /* Configure bluetooth coexistence. */ 3919 if ((error = wpi_send_btcoex(sc)) != 0) { 3920 device_printf(sc->sc_dev, 3921 "could not configure bluetooth coexistence\n"); 3922 return error; 3923 } 3924 3925 /* Configure adapter. */ 3926 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3927 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3928 3929 /* Set default channel. */ 3930 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3931 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3932 if (IEEE80211_IS_CHAN_2GHZ(c)) 3933 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3934 3935 sc->rxon.filter = WPI_FILTER_MULTICAST; 3936 switch (ic->ic_opmode) { 3937 case IEEE80211_M_STA: 3938 sc->rxon.mode = WPI_MODE_STA; 3939 break; 3940 case IEEE80211_M_IBSS: 3941 sc->rxon.mode = WPI_MODE_IBSS; 3942 sc->rxon.filter |= WPI_FILTER_BEACON; 3943 break; 3944 case IEEE80211_M_HOSTAP: 3945 /* XXX workaround for beaconing */ 3946 sc->rxon.mode = WPI_MODE_IBSS; 3947 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3948 break; 3949 case IEEE80211_M_AHDEMO: 3950 sc->rxon.mode = WPI_MODE_HOSTAP; 3951 break; 3952 case IEEE80211_M_MONITOR: 3953 sc->rxon.mode = WPI_MODE_MONITOR; 3954 break; 3955 default: 3956 device_printf(sc->sc_dev, "unknown opmode %d\n", 3957 ic->ic_opmode); 3958 return EINVAL; 3959 } 3960 sc->rxon.filter = htole32(sc->rxon.filter); 3961 wpi_set_promisc(sc); 3962 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3963 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3964 3965 /* XXX Current configuration may be unusable. */ 3966 if (IEEE80211_IS_CHAN_NOADHOC(c) && sc->rxon.mode == WPI_MODE_IBSS) { 3967 device_printf(sc->sc_dev, 3968 "%s: invalid channel (%d) selected for IBSS mode\n", 3969 __func__, ieee80211_chan2ieee(ic, c)); 3970 return EINVAL; 3971 } 3972 3973 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3974 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3975 __func__); 3976 return error; 3977 } 3978 3979 /* Setup rate scalling. */ 3980 if ((error = wpi_mrr_setup(sc)) != 0) { 3981 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3982 error); 3983 return error; 3984 } 3985 3986 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3987 3988 return 0; 3989 } 3990 3991 static uint16_t 3992 wpi_get_active_dwell_time(struct wpi_softc *sc, 3993 struct ieee80211_channel *c, uint8_t n_probes) 3994 { 3995 /* No channel? Default to 2GHz settings. */ 3996 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3997 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3998 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3999 } 4000 4001 /* 5GHz dwell time. */ 4002 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 4003 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 4004 } 4005 4006 /* 4007 * Limit the total dwell time. 4008 * 4009 * Returns the dwell time in milliseconds. 4010 */ 4011 static uint16_t 4012 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 4013 { 4014 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4015 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4016 int bintval = 0; 4017 4018 /* bintval is in TU (1.024mS) */ 4019 if (vap != NULL) 4020 bintval = vap->iv_bss->ni_intval; 4021 4022 /* 4023 * If it's non-zero, we should calculate the minimum of 4024 * it and the DWELL_BASE. 4025 * 4026 * XXX Yes, the math should take into account that bintval 4027 * is 1.024mS, not 1mS.. 4028 */ 4029 if (bintval > 0) { 4030 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 4031 bintval); 4032 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 4033 } 4034 4035 /* No association context? Default. */ 4036 return dwell_time; 4037 } 4038 4039 static uint16_t 4040 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 4041 { 4042 uint16_t passive; 4043 4044 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 4045 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 4046 else 4047 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 4048 4049 /* Clamp to the beacon interval if we're associated. */ 4050 return (wpi_limit_dwell(sc, passive)); 4051 } 4052 4053 static uint32_t 4054 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval) 4055 { 4056 uint32_t mod = (time % bintval) * IEEE80211_DUR_TU; 4057 uint32_t nbeacons = time / bintval; 4058 4059 if (mod > WPI_PAUSE_MAX_TIME) 4060 mod = WPI_PAUSE_MAX_TIME; 4061 4062 return WPI_PAUSE_SCAN(nbeacons, mod); 4063 } 4064 4065 /* 4066 * Send a scan request to the firmware. 4067 */ 4068 static int 4069 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 4070 { 4071 struct ifnet *ifp = sc->sc_ifp; 4072 struct ieee80211com *ic = ifp->if_l2com; 4073 struct ieee80211_scan_state *ss = ic->ic_scan; 4074 struct ieee80211vap *vap = ss->ss_vap; 4075 struct wpi_scan_hdr *hdr; 4076 struct wpi_cmd_data *tx; 4077 struct wpi_scan_essid *essids; 4078 struct wpi_scan_chan *chan; 4079 struct ieee80211_frame *wh; 4080 struct ieee80211_rateset *rs; 4081 uint16_t dwell_active, dwell_passive; 4082 uint8_t *buf, *frm; 4083 int bgscan, bintval, buflen, error, i, nssid; 4084 4085 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4086 4087 /* 4088 * We are absolutely not allowed to send a scan command when another 4089 * scan command is pending. 4090 */ 4091 if (callout_pending(&sc->scan_timeout)) { 4092 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 4093 __func__); 4094 error = EAGAIN; 4095 goto fail; 4096 } 4097 4098 bgscan = wpi_check_bss_filter(sc); 4099 bintval = vap->iv_bss->ni_intval; 4100 if (bgscan != 0 && 4101 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 4102 error = EOPNOTSUPP; 4103 goto fail; 4104 } 4105 4106 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4107 if (buf == NULL) { 4108 device_printf(sc->sc_dev, 4109 "%s: could not allocate buffer for scan command\n", 4110 __func__); 4111 error = ENOMEM; 4112 goto fail; 4113 } 4114 hdr = (struct wpi_scan_hdr *)buf; 4115 4116 /* 4117 * Move to the next channel if no packets are received within 10 msecs 4118 * after sending the probe request. 4119 */ 4120 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 4121 hdr->quiet_threshold = htole16(1); 4122 4123 if (bgscan != 0) { 4124 /* 4125 * Max needs to be greater than active and passive and quiet! 4126 * It's also in microseconds! 4127 */ 4128 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 4129 hdr->pause_svc = htole32(wpi_get_scan_pause_time(100, 4130 bintval)); 4131 } 4132 4133 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 4134 4135 tx = (struct wpi_cmd_data *)(hdr + 1); 4136 tx->flags = htole32(WPI_TX_AUTO_SEQ); 4137 tx->id = WPI_ID_BROADCAST; 4138 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 4139 4140 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4141 /* Send probe requests at 6Mbps. */ 4142 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 4143 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4144 } else { 4145 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 4146 /* Send probe requests at 1Mbps. */ 4147 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4148 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4149 } 4150 4151 essids = (struct wpi_scan_essid *)(tx + 1); 4152 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 4153 for (i = 0; i < nssid; i++) { 4154 essids[i].id = IEEE80211_ELEMID_SSID; 4155 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 4156 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 4157 #ifdef WPI_DEBUG 4158 if (sc->sc_debug & WPI_DEBUG_SCAN) { 4159 printf("Scanning Essid: "); 4160 ieee80211_print_essid(essids[i].data, essids[i].len); 4161 printf("\n"); 4162 } 4163 #endif 4164 } 4165 4166 /* 4167 * Build a probe request frame. Most of the following code is a 4168 * copy & paste of what is done in net80211. 4169 */ 4170 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 4171 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4172 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4173 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4174 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4175 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 4176 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4177 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ 4178 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ 4179 4180 frm = (uint8_t *)(wh + 1); 4181 frm = ieee80211_add_ssid(frm, NULL, 0); 4182 frm = ieee80211_add_rates(frm, rs); 4183 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 4184 frm = ieee80211_add_xrates(frm, rs); 4185 4186 /* Set length of probe request. */ 4187 tx->len = htole16(frm - (uint8_t *)wh); 4188 4189 /* 4190 * Construct information about the channel that we 4191 * want to scan. The firmware expects this to be directly 4192 * after the scan probe request 4193 */ 4194 chan = (struct wpi_scan_chan *)frm; 4195 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4196 chan->flags = 0; 4197 if (nssid) { 4198 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 4199 chan->flags |= WPI_CHAN_NPBREQS(nssid); 4200 } else 4201 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4202 4203 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4204 chan->flags |= WPI_CHAN_ACTIVE; 4205 4206 /* 4207 * Calculate the active/passive dwell times. 4208 */ 4209 4210 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4211 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4212 4213 /* Make sure they're valid. */ 4214 if (dwell_active > dwell_passive) 4215 dwell_active = dwell_passive; 4216 4217 chan->active = htole16(dwell_active); 4218 chan->passive = htole16(dwell_passive); 4219 4220 chan->dsp_gain = 0x6e; /* Default level */ 4221 4222 if (IEEE80211_IS_CHAN_5GHZ(c)) 4223 chan->rf_gain = 0x3b; 4224 else 4225 chan->rf_gain = 0x28; 4226 4227 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4228 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4229 4230 hdr->nchan++; 4231 4232 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4233 /* XXX Force probe request transmission. */ 4234 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4235 4236 chan++; 4237 4238 /* Reduce unnecessary delay. */ 4239 chan->flags = 0; 4240 chan->passive = chan->active = hdr->quiet_time; 4241 4242 hdr->nchan++; 4243 } 4244 4245 chan++; 4246 4247 buflen = (uint8_t *)chan - buf; 4248 hdr->len = htole16(buflen); 4249 4250 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4251 hdr->nchan); 4252 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4253 free(buf, M_DEVBUF); 4254 4255 if (error != 0) 4256 goto fail; 4257 4258 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4259 4260 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4261 4262 return 0; 4263 4264 fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4265 4266 return error; 4267 } 4268 4269 static int 4270 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4271 { 4272 struct ieee80211com *ic = vap->iv_ic; 4273 struct ieee80211_node *ni = vap->iv_bss; 4274 struct ieee80211_channel *c = ni->ni_chan; 4275 int error; 4276 4277 WPI_RXON_LOCK(sc); 4278 4279 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4280 4281 /* Update adapter configuration. */ 4282 sc->rxon.associd = 0; 4283 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4284 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4285 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4286 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4287 if (IEEE80211_IS_CHAN_2GHZ(c)) 4288 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4289 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4290 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4291 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4292 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4293 if (IEEE80211_IS_CHAN_A(c)) { 4294 sc->rxon.cck_mask = 0; 4295 sc->rxon.ofdm_mask = 0x15; 4296 } else if (IEEE80211_IS_CHAN_B(c)) { 4297 sc->rxon.cck_mask = 0x03; 4298 sc->rxon.ofdm_mask = 0; 4299 } else { 4300 /* Assume 802.11b/g. */ 4301 sc->rxon.cck_mask = 0x0f; 4302 sc->rxon.ofdm_mask = 0x15; 4303 } 4304 4305 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4306 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4307 sc->rxon.ofdm_mask); 4308 4309 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4310 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4311 __func__); 4312 } 4313 4314 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4315 4316 WPI_RXON_UNLOCK(sc); 4317 4318 return error; 4319 } 4320 4321 static int 4322 wpi_config_beacon(struct wpi_vap *wvp) 4323 { 4324 struct ieee80211com *ic = wvp->wv_vap.iv_ic; 4325 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4326 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4327 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4328 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4329 struct ieee80211_tim_ie *tie; 4330 struct mbuf *m; 4331 uint8_t *ptr; 4332 int error; 4333 4334 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4335 4336 WPI_VAP_LOCK_ASSERT(wvp); 4337 4338 cmd->len = htole16(bcn->m->m_pkthdr.len); 4339 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4340 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4341 4342 /* XXX seems to be unused */ 4343 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4344 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4345 ptr = mtod(bcn->m, uint8_t *); 4346 4347 cmd->tim = htole16(bo->bo_tim - ptr); 4348 cmd->timsz = tie->tim_len; 4349 } 4350 4351 /* Necessary for recursion in ieee80211_beacon_update(). */ 4352 m = bcn->m; 4353 bcn->m = m_dup(m, M_NOWAIT); 4354 if (bcn->m == NULL) { 4355 device_printf(sc->sc_dev, 4356 "%s: could not copy beacon frame\n", __func__); 4357 error = ENOMEM; 4358 goto end; 4359 } 4360 4361 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4362 device_printf(sc->sc_dev, 4363 "%s: could not update beacon frame, error %d", __func__, 4364 error); 4365 } 4366 4367 /* Restore mbuf. */ 4368 end: bcn->m = m; 4369 4370 return error; 4371 } 4372 4373 static int 4374 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4375 { 4376 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 4377 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4378 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4379 struct mbuf *m; 4380 int error; 4381 4382 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4383 4384 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4385 return EINVAL; 4386 4387 m = ieee80211_beacon_alloc(ni, bo); 4388 if (m == NULL) { 4389 device_printf(sc->sc_dev, 4390 "%s: could not allocate beacon frame\n", __func__); 4391 return ENOMEM; 4392 } 4393 4394 WPI_VAP_LOCK(wvp); 4395 if (bcn->m != NULL) 4396 m_freem(bcn->m); 4397 4398 bcn->m = m; 4399 4400 error = wpi_config_beacon(wvp); 4401 WPI_VAP_UNLOCK(wvp); 4402 4403 return error; 4404 } 4405 4406 static void 4407 wpi_update_beacon(struct ieee80211vap *vap, int item) 4408 { 4409 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4410 struct wpi_vap *wvp = WPI_VAP(vap); 4411 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4412 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4413 struct ieee80211_node *ni = vap->iv_bss; 4414 int mcast = 0; 4415 4416 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4417 4418 WPI_VAP_LOCK(wvp); 4419 if (bcn->m == NULL) { 4420 bcn->m = ieee80211_beacon_alloc(ni, bo); 4421 if (bcn->m == NULL) { 4422 device_printf(sc->sc_dev, 4423 "%s: could not allocate beacon frame\n", __func__); 4424 4425 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4426 __func__); 4427 4428 WPI_VAP_UNLOCK(wvp); 4429 return; 4430 } 4431 } 4432 WPI_VAP_UNLOCK(wvp); 4433 4434 if (item == IEEE80211_BEACON_TIM) 4435 mcast = 1; /* TODO */ 4436 4437 setbit(bo->bo_flags, item); 4438 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4439 4440 WPI_VAP_LOCK(wvp); 4441 wpi_config_beacon(wvp); 4442 WPI_VAP_UNLOCK(wvp); 4443 4444 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4445 } 4446 4447 static void 4448 wpi_newassoc(struct ieee80211_node *ni, int isnew) 4449 { 4450 struct ieee80211vap *vap = ni->ni_vap; 4451 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4452 struct wpi_node *wn = WPI_NODE(ni); 4453 int error; 4454 4455 WPI_NT_LOCK(sc); 4456 4457 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4458 4459 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4460 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4461 device_printf(sc->sc_dev, 4462 "%s: could not add IBSS node, error %d\n", 4463 __func__, error); 4464 } 4465 } 4466 WPI_NT_UNLOCK(sc); 4467 } 4468 4469 static int 4470 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4471 { 4472 struct ieee80211com *ic = vap->iv_ic; 4473 struct ieee80211_node *ni = vap->iv_bss; 4474 struct ieee80211_channel *c = ni->ni_chan; 4475 int error; 4476 4477 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4478 4479 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4480 /* Link LED blinks while monitoring. */ 4481 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4482 return 0; 4483 } 4484 4485 /* XXX kernel panic workaround */ 4486 if (c == IEEE80211_CHAN_ANYC) { 4487 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4488 __func__); 4489 return EINVAL; 4490 } 4491 4492 if ((error = wpi_set_timing(sc, ni)) != 0) { 4493 device_printf(sc->sc_dev, 4494 "%s: could not set timing, error %d\n", __func__, error); 4495 return error; 4496 } 4497 4498 /* Update adapter configuration. */ 4499 WPI_RXON_LOCK(sc); 4500 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4501 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4502 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4503 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4504 if (IEEE80211_IS_CHAN_2GHZ(c)) 4505 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4506 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4507 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4508 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4509 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4510 if (IEEE80211_IS_CHAN_A(c)) { 4511 sc->rxon.cck_mask = 0; 4512 sc->rxon.ofdm_mask = 0x15; 4513 } else if (IEEE80211_IS_CHAN_B(c)) { 4514 sc->rxon.cck_mask = 0x03; 4515 sc->rxon.ofdm_mask = 0; 4516 } else { 4517 /* Assume 802.11b/g. */ 4518 sc->rxon.cck_mask = 0x0f; 4519 sc->rxon.ofdm_mask = 0x15; 4520 } 4521 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4522 4523 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4524 sc->rxon.chan, sc->rxon.flags); 4525 4526 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4527 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4528 __func__); 4529 return error; 4530 } 4531 4532 /* Start periodic calibration timer. */ 4533 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4534 4535 WPI_RXON_UNLOCK(sc); 4536 4537 if (vap->iv_opmode == IEEE80211_M_IBSS || 4538 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4539 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4540 device_printf(sc->sc_dev, 4541 "%s: could not setup beacon, error %d\n", __func__, 4542 error); 4543 return error; 4544 } 4545 } 4546 4547 if (vap->iv_opmode == IEEE80211_M_STA) { 4548 /* Add BSS node. */ 4549 WPI_NT_LOCK(sc); 4550 error = wpi_add_sta_node(sc, ni); 4551 WPI_NT_UNLOCK(sc); 4552 if (error != 0) { 4553 device_printf(sc->sc_dev, 4554 "%s: could not add BSS node, error %d\n", __func__, 4555 error); 4556 return error; 4557 } 4558 } 4559 4560 /* Link LED always on while associated. */ 4561 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4562 4563 /* Enable power-saving mode if requested by user. */ 4564 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4565 vap->iv_opmode != IEEE80211_M_IBSS) 4566 (void)wpi_set_pslevel(sc, 0, 3, 1); 4567 4568 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4569 4570 return 0; 4571 } 4572 4573 static int 4574 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4575 { 4576 const struct ieee80211_cipher *cip = k->wk_cipher; 4577 struct ieee80211vap *vap = ni->ni_vap; 4578 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4579 struct wpi_node *wn = WPI_NODE(ni); 4580 struct wpi_node_info node; 4581 uint16_t kflags; 4582 int error; 4583 4584 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4585 4586 if (wpi_check_node_entry(sc, wn->id) == 0) { 4587 device_printf(sc->sc_dev, "%s: node does not exist\n", 4588 __func__); 4589 return 0; 4590 } 4591 4592 switch (cip->ic_cipher) { 4593 case IEEE80211_CIPHER_AES_CCM: 4594 kflags = WPI_KFLAG_CCMP; 4595 break; 4596 4597 default: 4598 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4599 cip->ic_cipher); 4600 return 0; 4601 } 4602 4603 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4604 if (k->wk_flags & IEEE80211_KEY_GROUP) 4605 kflags |= WPI_KFLAG_MULTICAST; 4606 4607 memset(&node, 0, sizeof node); 4608 node.id = wn->id; 4609 node.control = WPI_NODE_UPDATE; 4610 node.flags = WPI_FLAG_KEY_SET; 4611 node.kflags = htole16(kflags); 4612 memcpy(node.key, k->wk_key, k->wk_keylen); 4613 again: 4614 DPRINTF(sc, WPI_DEBUG_KEY, 4615 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4616 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4617 node.id, ether_sprintf(ni->ni_macaddr)); 4618 4619 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4620 if (error != 0) { 4621 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4622 error); 4623 return !error; 4624 } 4625 4626 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4627 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4628 kflags |= WPI_KFLAG_MULTICAST; 4629 node.kflags = htole16(kflags); 4630 4631 goto again; 4632 } 4633 4634 return 1; 4635 } 4636 4637 static void 4638 wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4639 { 4640 const struct ieee80211_key *k = arg; 4641 struct ieee80211vap *vap = ni->ni_vap; 4642 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4643 struct wpi_node *wn = WPI_NODE(ni); 4644 int error; 4645 4646 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4647 return; 4648 4649 WPI_NT_LOCK(sc); 4650 error = wpi_load_key(ni, k); 4651 WPI_NT_UNLOCK(sc); 4652 4653 if (error == 0) { 4654 device_printf(sc->sc_dev, "%s: error while setting key\n", 4655 __func__); 4656 } 4657 } 4658 4659 static int 4660 wpi_set_global_keys(struct ieee80211_node *ni) 4661 { 4662 struct ieee80211vap *vap = ni->ni_vap; 4663 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4664 int error = 1; 4665 4666 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4667 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4668 error = wpi_load_key(ni, wk); 4669 4670 return !error; 4671 } 4672 4673 static int 4674 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4675 { 4676 struct ieee80211vap *vap = ni->ni_vap; 4677 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4678 struct wpi_node *wn = WPI_NODE(ni); 4679 struct wpi_node_info node; 4680 uint16_t kflags; 4681 int error; 4682 4683 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4684 4685 if (wpi_check_node_entry(sc, wn->id) == 0) { 4686 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4687 return 1; /* Nothing to do. */ 4688 } 4689 4690 kflags = WPI_KFLAG_KID(k->wk_keyix); 4691 if (k->wk_flags & IEEE80211_KEY_GROUP) 4692 kflags |= WPI_KFLAG_MULTICAST; 4693 4694 memset(&node, 0, sizeof node); 4695 node.id = wn->id; 4696 node.control = WPI_NODE_UPDATE; 4697 node.flags = WPI_FLAG_KEY_SET; 4698 node.kflags = htole16(kflags); 4699 again: 4700 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4701 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4702 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4703 4704 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4705 if (error != 0) { 4706 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4707 error); 4708 return !error; 4709 } 4710 4711 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4712 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4713 kflags |= WPI_KFLAG_MULTICAST; 4714 node.kflags = htole16(kflags); 4715 4716 goto again; 4717 } 4718 4719 return 1; 4720 } 4721 4722 static void 4723 wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4724 { 4725 const struct ieee80211_key *k = arg; 4726 struct ieee80211vap *vap = ni->ni_vap; 4727 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4728 struct wpi_node *wn = WPI_NODE(ni); 4729 int error; 4730 4731 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4732 return; 4733 4734 WPI_NT_LOCK(sc); 4735 error = wpi_del_key(ni, k); 4736 WPI_NT_UNLOCK(sc); 4737 4738 if (error == 0) { 4739 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4740 __func__); 4741 } 4742 } 4743 4744 static int 4745 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4746 int set) 4747 { 4748 struct ieee80211com *ic = vap->iv_ic; 4749 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4750 struct wpi_vap *wvp = WPI_VAP(vap); 4751 struct ieee80211_node *ni; 4752 int error, ni_ref = 0; 4753 4754 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4755 4756 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4757 /* Not for us. */ 4758 return 1; 4759 } 4760 4761 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4762 /* XMIT keys are handled in wpi_tx_data(). */ 4763 return 1; 4764 } 4765 4766 /* Handle group keys. */ 4767 if (&vap->iv_nw_keys[0] <= k && 4768 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4769 WPI_NT_LOCK(sc); 4770 if (set) 4771 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4772 else 4773 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4774 WPI_NT_UNLOCK(sc); 4775 4776 if (vap->iv_state == IEEE80211_S_RUN) { 4777 ieee80211_iterate_nodes(&ic->ic_sta, 4778 set ? wpi_load_key_cb : wpi_del_key_cb, 4779 __DECONST(void *, k)); 4780 } 4781 4782 return 1; 4783 } 4784 4785 switch (vap->iv_opmode) { 4786 case IEEE80211_M_STA: 4787 ni = vap->iv_bss; 4788 break; 4789 4790 case IEEE80211_M_IBSS: 4791 case IEEE80211_M_AHDEMO: 4792 case IEEE80211_M_HOSTAP: 4793 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4794 if (ni == NULL) 4795 return 0; /* should not happen */ 4796 4797 ni_ref = 1; 4798 break; 4799 4800 default: 4801 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4802 vap->iv_opmode); 4803 return 0; 4804 } 4805 4806 WPI_NT_LOCK(sc); 4807 if (set) 4808 error = wpi_load_key(ni, k); 4809 else 4810 error = wpi_del_key(ni, k); 4811 WPI_NT_UNLOCK(sc); 4812 4813 if (ni_ref) 4814 ieee80211_node_decref(ni); 4815 4816 return error; 4817 } 4818 4819 static int 4820 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4821 const uint8_t mac[IEEE80211_ADDR_LEN]) 4822 { 4823 return wpi_process_key(vap, k, 1); 4824 } 4825 4826 static int 4827 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4828 { 4829 return wpi_process_key(vap, k, 0); 4830 } 4831 4832 /* 4833 * This function is called after the runtime firmware notifies us of its 4834 * readiness (called in a process context). 4835 */ 4836 static int 4837 wpi_post_alive(struct wpi_softc *sc) 4838 { 4839 int ntries, error; 4840 4841 /* Check (again) that the radio is not disabled. */ 4842 if ((error = wpi_nic_lock(sc)) != 0) 4843 return error; 4844 4845 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4846 4847 /* NB: Runtime firmware must be up and running. */ 4848 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4849 device_printf(sc->sc_dev, 4850 "RF switch: radio disabled (%s)\n", __func__); 4851 wpi_nic_unlock(sc); 4852 return EPERM; /* :-) */ 4853 } 4854 wpi_nic_unlock(sc); 4855 4856 /* Wait for thermal sensor to calibrate. */ 4857 for (ntries = 0; ntries < 1000; ntries++) { 4858 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4859 break; 4860 DELAY(10); 4861 } 4862 4863 if (ntries == 1000) { 4864 device_printf(sc->sc_dev, 4865 "timeout waiting for thermal sensor calibration\n"); 4866 return ETIMEDOUT; 4867 } 4868 4869 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4870 return 0; 4871 } 4872 4873 /* 4874 * The firmware boot code is small and is intended to be copied directly into 4875 * the NIC internal memory (no DMA transfer). 4876 */ 4877 static int 4878 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4879 { 4880 int error, ntries; 4881 4882 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4883 4884 size /= sizeof (uint32_t); 4885 4886 if ((error = wpi_nic_lock(sc)) != 0) 4887 return error; 4888 4889 /* Copy microcode image into NIC memory. */ 4890 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4891 (const uint32_t *)ucode, size); 4892 4893 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4894 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4895 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4896 4897 /* Start boot load now. */ 4898 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4899 4900 /* Wait for transfer to complete. */ 4901 for (ntries = 0; ntries < 1000; ntries++) { 4902 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4903 DPRINTF(sc, WPI_DEBUG_HW, 4904 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4905 WPI_FH_TX_STATUS_IDLE(6), 4906 status & WPI_FH_TX_STATUS_IDLE(6)); 4907 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4908 DPRINTF(sc, WPI_DEBUG_HW, 4909 "Status Match! - ntries = %d\n", ntries); 4910 break; 4911 } 4912 DELAY(10); 4913 } 4914 if (ntries == 1000) { 4915 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4916 __func__); 4917 wpi_nic_unlock(sc); 4918 return ETIMEDOUT; 4919 } 4920 4921 /* Enable boot after power up. */ 4922 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4923 4924 wpi_nic_unlock(sc); 4925 return 0; 4926 } 4927 4928 static int 4929 wpi_load_firmware(struct wpi_softc *sc) 4930 { 4931 struct wpi_fw_info *fw = &sc->fw; 4932 struct wpi_dma_info *dma = &sc->fw_dma; 4933 int error; 4934 4935 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4936 4937 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4938 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4939 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4940 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4941 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4942 4943 /* Tell adapter where to find initialization sections. */ 4944 if ((error = wpi_nic_lock(sc)) != 0) 4945 return error; 4946 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4947 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4948 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4949 dma->paddr + WPI_FW_DATA_MAXSZ); 4950 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4951 wpi_nic_unlock(sc); 4952 4953 /* Load firmware boot code. */ 4954 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4955 if (error != 0) { 4956 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4957 __func__); 4958 return error; 4959 } 4960 4961 /* Now press "execute". */ 4962 WPI_WRITE(sc, WPI_RESET, 0); 4963 4964 /* Wait at most one second for first alive notification. */ 4965 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4966 device_printf(sc->sc_dev, 4967 "%s: timeout waiting for adapter to initialize, error %d\n", 4968 __func__, error); 4969 return error; 4970 } 4971 4972 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4973 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4974 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4975 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4976 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4977 4978 /* Tell adapter where to find runtime sections. */ 4979 if ((error = wpi_nic_lock(sc)) != 0) 4980 return error; 4981 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4982 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4983 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4984 dma->paddr + WPI_FW_DATA_MAXSZ); 4985 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4986 WPI_FW_UPDATED | fw->main.textsz); 4987 wpi_nic_unlock(sc); 4988 4989 return 0; 4990 } 4991 4992 static int 4993 wpi_read_firmware(struct wpi_softc *sc) 4994 { 4995 const struct firmware *fp; 4996 struct wpi_fw_info *fw = &sc->fw; 4997 const struct wpi_firmware_hdr *hdr; 4998 int error; 4999 5000 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5001 5002 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5003 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 5004 5005 WPI_UNLOCK(sc); 5006 fp = firmware_get(WPI_FW_NAME); 5007 WPI_LOCK(sc); 5008 5009 if (fp == NULL) { 5010 device_printf(sc->sc_dev, 5011 "could not load firmware image '%s'\n", WPI_FW_NAME); 5012 return EINVAL; 5013 } 5014 5015 sc->fw_fp = fp; 5016 5017 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 5018 device_printf(sc->sc_dev, 5019 "firmware file too short: %zu bytes\n", fp->datasize); 5020 error = EINVAL; 5021 goto fail; 5022 } 5023 5024 fw->size = fp->datasize; 5025 fw->data = (const uint8_t *)fp->data; 5026 5027 /* Extract firmware header information. */ 5028 hdr = (const struct wpi_firmware_hdr *)fw->data; 5029 5030 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 5031 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 5032 5033 fw->main.textsz = le32toh(hdr->rtextsz); 5034 fw->main.datasz = le32toh(hdr->rdatasz); 5035 fw->init.textsz = le32toh(hdr->itextsz); 5036 fw->init.datasz = le32toh(hdr->idatasz); 5037 fw->boot.textsz = le32toh(hdr->btextsz); 5038 fw->boot.datasz = 0; 5039 5040 /* Sanity-check firmware header. */ 5041 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 5042 fw->main.datasz > WPI_FW_DATA_MAXSZ || 5043 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 5044 fw->init.datasz > WPI_FW_DATA_MAXSZ || 5045 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 5046 (fw->boot.textsz & 3) != 0) { 5047 device_printf(sc->sc_dev, "invalid firmware header\n"); 5048 error = EINVAL; 5049 goto fail; 5050 } 5051 5052 /* Check that all firmware sections fit. */ 5053 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5054 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5055 device_printf(sc->sc_dev, 5056 "firmware file too short: %zu bytes\n", fw->size); 5057 error = EINVAL; 5058 goto fail; 5059 } 5060 5061 /* Get pointers to firmware sections. */ 5062 fw->main.text = (const uint8_t *)(hdr + 1); 5063 fw->main.data = fw->main.text + fw->main.textsz; 5064 fw->init.text = fw->main.data + fw->main.datasz; 5065 fw->init.data = fw->init.text + fw->init.textsz; 5066 fw->boot.text = fw->init.data + fw->init.datasz; 5067 5068 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 5069 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 5070 "runtime (text: %u, data: %u) init (text: %u, data %u) " 5071 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 5072 fw->main.textsz, fw->main.datasz, 5073 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 5074 5075 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 5076 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 5077 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 5078 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 5079 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 5080 5081 return 0; 5082 5083 fail: wpi_unload_firmware(sc); 5084 return error; 5085 } 5086 5087 /** 5088 * Free the referenced firmware image 5089 */ 5090 static void 5091 wpi_unload_firmware(struct wpi_softc *sc) 5092 { 5093 if (sc->fw_fp != NULL) { 5094 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5095 sc->fw_fp = NULL; 5096 } 5097 } 5098 5099 static int 5100 wpi_clock_wait(struct wpi_softc *sc) 5101 { 5102 int ntries; 5103 5104 /* Set "initialization complete" bit. */ 5105 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5106 5107 /* Wait for clock stabilization. */ 5108 for (ntries = 0; ntries < 2500; ntries++) { 5109 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 5110 return 0; 5111 DELAY(100); 5112 } 5113 device_printf(sc->sc_dev, 5114 "%s: timeout waiting for clock stabilization\n", __func__); 5115 5116 return ETIMEDOUT; 5117 } 5118 5119 static int 5120 wpi_apm_init(struct wpi_softc *sc) 5121 { 5122 uint32_t reg; 5123 int error; 5124 5125 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5126 5127 /* Disable L0s exit timer (NMI bug workaround). */ 5128 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 5129 /* Don't wait for ICH L0s (ICH bug workaround). */ 5130 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 5131 5132 /* Set FH wait threshold to max (HW bug under stress workaround). */ 5133 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 5134 5135 /* Retrieve PCIe Active State Power Management (ASPM). */ 5136 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5137 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 5138 if (reg & 0x02) /* L1 Entry enabled. */ 5139 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5140 else 5141 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 5142 5143 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 5144 5145 /* Wait for clock stabilization before accessing prph. */ 5146 if ((error = wpi_clock_wait(sc)) != 0) 5147 return error; 5148 5149 if ((error = wpi_nic_lock(sc)) != 0) 5150 return error; 5151 /* Cleanup. */ 5152 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 5153 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 5154 5155 /* Enable DMA and BSM (Bootstrap State Machine). */ 5156 wpi_prph_write(sc, WPI_APMG_CLK_EN, 5157 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 5158 DELAY(20); 5159 /* Disable L1-Active. */ 5160 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 5161 wpi_nic_unlock(sc); 5162 5163 return 0; 5164 } 5165 5166 static void 5167 wpi_apm_stop_master(struct wpi_softc *sc) 5168 { 5169 int ntries; 5170 5171 /* Stop busmaster DMA activity. */ 5172 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 5173 5174 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 5175 WPI_GP_CNTRL_MAC_PS) 5176 return; /* Already asleep. */ 5177 5178 for (ntries = 0; ntries < 100; ntries++) { 5179 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 5180 return; 5181 DELAY(10); 5182 } 5183 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5184 __func__); 5185 } 5186 5187 static void 5188 wpi_apm_stop(struct wpi_softc *sc) 5189 { 5190 wpi_apm_stop_master(sc); 5191 5192 /* Reset the entire device. */ 5193 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 5194 DELAY(10); 5195 /* Clear "initialization complete" bit. */ 5196 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 5197 } 5198 5199 static void 5200 wpi_nic_config(struct wpi_softc *sc) 5201 { 5202 uint32_t rev; 5203 5204 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5205 5206 /* voodoo from the Linux "driver".. */ 5207 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5208 if ((rev & 0xc0) == 0x40) 5209 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5210 else if (!(rev & 0x80)) 5211 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5212 5213 if (sc->cap == 0x80) 5214 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5215 5216 if ((sc->rev & 0xf0) == 0xd0) 5217 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5218 else 5219 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5220 5221 if (sc->type > 1) 5222 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5223 } 5224 5225 static int 5226 wpi_hw_init(struct wpi_softc *sc) 5227 { 5228 int chnl, ntries, error; 5229 5230 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5231 5232 /* Clear pending interrupts. */ 5233 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5234 5235 if ((error = wpi_apm_init(sc)) != 0) { 5236 device_printf(sc->sc_dev, 5237 "%s: could not power ON adapter, error %d\n", __func__, 5238 error); 5239 return error; 5240 } 5241 5242 /* Select VMAIN power source. */ 5243 if ((error = wpi_nic_lock(sc)) != 0) 5244 return error; 5245 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5246 wpi_nic_unlock(sc); 5247 /* Spin until VMAIN gets selected. */ 5248 for (ntries = 0; ntries < 5000; ntries++) { 5249 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5250 break; 5251 DELAY(10); 5252 } 5253 if (ntries == 5000) { 5254 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5255 return ETIMEDOUT; 5256 } 5257 5258 /* Perform adapter initialization. */ 5259 wpi_nic_config(sc); 5260 5261 /* Initialize RX ring. */ 5262 if ((error = wpi_nic_lock(sc)) != 0) 5263 return error; 5264 /* Set physical address of RX ring. */ 5265 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5266 /* Set physical address of RX read pointer. */ 5267 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5268 offsetof(struct wpi_shared, next)); 5269 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5270 /* Enable RX. */ 5271 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5272 WPI_FH_RX_CONFIG_DMA_ENA | 5273 WPI_FH_RX_CONFIG_RDRBD_ENA | 5274 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5275 WPI_FH_RX_CONFIG_MAXFRAG | 5276 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5277 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5278 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5279 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5280 wpi_nic_unlock(sc); 5281 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5282 5283 /* Initialize TX rings. */ 5284 if ((error = wpi_nic_lock(sc)) != 0) 5285 return error; 5286 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5287 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5288 /* Enable all 6 TX rings. */ 5289 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5290 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5291 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5292 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5293 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5294 /* Set physical address of TX rings. */ 5295 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5296 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5297 5298 /* Enable all DMA channels. */ 5299 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5300 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5301 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5302 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5303 } 5304 wpi_nic_unlock(sc); 5305 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5306 5307 /* Clear "radio off" and "commands blocked" bits. */ 5308 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5309 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5310 5311 /* Clear pending interrupts. */ 5312 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5313 /* Enable interrupts. */ 5314 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5315 5316 /* _Really_ make sure "radio off" bit is cleared! */ 5317 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5318 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5319 5320 if ((error = wpi_load_firmware(sc)) != 0) { 5321 device_printf(sc->sc_dev, 5322 "%s: could not load firmware, error %d\n", __func__, 5323 error); 5324 return error; 5325 } 5326 /* Wait at most one second for firmware alive notification. */ 5327 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5328 device_printf(sc->sc_dev, 5329 "%s: timeout waiting for adapter to initialize, error %d\n", 5330 __func__, error); 5331 return error; 5332 } 5333 5334 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5335 5336 /* Do post-firmware initialization. */ 5337 return wpi_post_alive(sc); 5338 } 5339 5340 static void 5341 wpi_hw_stop(struct wpi_softc *sc) 5342 { 5343 int chnl, qid, ntries; 5344 5345 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5346 5347 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5348 wpi_nic_lock(sc); 5349 5350 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5351 5352 /* Disable interrupts. */ 5353 WPI_WRITE(sc, WPI_INT_MASK, 0); 5354 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5355 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5356 5357 /* Make sure we no longer hold the NIC lock. */ 5358 wpi_nic_unlock(sc); 5359 5360 if (wpi_nic_lock(sc) == 0) { 5361 /* Stop TX scheduler. */ 5362 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5363 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5364 5365 /* Stop all DMA channels. */ 5366 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5367 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5368 for (ntries = 0; ntries < 200; ntries++) { 5369 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5370 WPI_FH_TX_STATUS_IDLE(chnl)) 5371 break; 5372 DELAY(10); 5373 } 5374 } 5375 wpi_nic_unlock(sc); 5376 } 5377 5378 /* Stop RX ring. */ 5379 wpi_reset_rx_ring(sc); 5380 5381 /* Reset all TX rings. */ 5382 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5383 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5384 5385 if (wpi_nic_lock(sc) == 0) { 5386 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5387 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5388 wpi_nic_unlock(sc); 5389 } 5390 DELAY(5); 5391 /* Power OFF adapter. */ 5392 wpi_apm_stop(sc); 5393 } 5394 5395 static void 5396 wpi_radio_on(void *arg0, int pending) 5397 { 5398 struct wpi_softc *sc = arg0; 5399 struct ifnet *ifp = sc->sc_ifp; 5400 struct ieee80211com *ic = ifp->if_l2com; 5401 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5402 5403 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5404 5405 if (vap != NULL) { 5406 wpi_init(sc); 5407 ieee80211_init(vap); 5408 } 5409 5410 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) { 5411 WPI_LOCK(sc); 5412 callout_stop(&sc->watchdog_rfkill); 5413 WPI_UNLOCK(sc); 5414 } 5415 } 5416 5417 static void 5418 wpi_radio_off(void *arg0, int pending) 5419 { 5420 struct wpi_softc *sc = arg0; 5421 struct ifnet *ifp = sc->sc_ifp; 5422 struct ieee80211com *ic = ifp->if_l2com; 5423 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5424 5425 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5426 5427 wpi_stop(sc); 5428 if (vap != NULL) 5429 ieee80211_stop(vap); 5430 5431 WPI_LOCK(sc); 5432 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5433 WPI_UNLOCK(sc); 5434 } 5435 5436 static void 5437 wpi_init(void *arg) 5438 { 5439 struct wpi_softc *sc = arg; 5440 struct ifnet *ifp = sc->sc_ifp; 5441 struct ieee80211com *ic = ifp->if_l2com; 5442 int error; 5443 5444 WPI_LOCK(sc); 5445 5446 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5447 5448 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 5449 goto end; 5450 5451 /* Check that the radio is not disabled by hardware switch. */ 5452 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5453 device_printf(sc->sc_dev, 5454 "RF switch: radio disabled (%s)\n", __func__); 5455 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5456 sc); 5457 goto end; 5458 } 5459 5460 /* Read firmware images from the filesystem. */ 5461 if ((error = wpi_read_firmware(sc)) != 0) { 5462 device_printf(sc->sc_dev, 5463 "%s: could not read firmware, error %d\n", __func__, 5464 error); 5465 goto fail; 5466 } 5467 5468 /* Initialize hardware and upload firmware. */ 5469 error = wpi_hw_init(sc); 5470 wpi_unload_firmware(sc); 5471 if (error != 0) { 5472 device_printf(sc->sc_dev, 5473 "%s: could not initialize hardware, error %d\n", __func__, 5474 error); 5475 goto fail; 5476 } 5477 5478 /* Configure adapter now that it is ready. */ 5479 sc->txq_active = 1; 5480 if ((error = wpi_config(sc)) != 0) { 5481 device_printf(sc->sc_dev, 5482 "%s: could not configure device, error %d\n", __func__, 5483 error); 5484 goto fail; 5485 } 5486 5487 IF_LOCK(&ifp->if_snd); 5488 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5489 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5490 IF_UNLOCK(&ifp->if_snd); 5491 5492 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5493 5494 WPI_UNLOCK(sc); 5495 5496 ieee80211_start_all(ic); 5497 5498 return; 5499 5500 fail: wpi_stop_locked(sc); 5501 end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5502 WPI_UNLOCK(sc); 5503 } 5504 5505 static void 5506 wpi_stop_locked(struct wpi_softc *sc) 5507 { 5508 struct ifnet *ifp = sc->sc_ifp; 5509 5510 WPI_LOCK_ASSERT(sc); 5511 5512 WPI_TXQ_LOCK(sc); 5513 sc->txq_active = 0; 5514 WPI_TXQ_UNLOCK(sc); 5515 5516 WPI_TXQ_STATE_LOCK(sc); 5517 callout_stop(&sc->tx_timeout); 5518 WPI_TXQ_STATE_UNLOCK(sc); 5519 5520 WPI_RXON_LOCK(sc); 5521 callout_stop(&sc->scan_timeout); 5522 callout_stop(&sc->calib_to); 5523 WPI_RXON_UNLOCK(sc); 5524 5525 IF_LOCK(&ifp->if_snd); 5526 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5527 IF_UNLOCK(&ifp->if_snd); 5528 5529 /* Power OFF hardware. */ 5530 wpi_hw_stop(sc); 5531 } 5532 5533 static void 5534 wpi_stop(struct wpi_softc *sc) 5535 { 5536 WPI_LOCK(sc); 5537 wpi_stop_locked(sc); 5538 WPI_UNLOCK(sc); 5539 } 5540 5541 /* 5542 * Callback from net80211 to start a scan. 5543 */ 5544 static void 5545 wpi_scan_start(struct ieee80211com *ic) 5546 { 5547 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5548 5549 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5550 } 5551 5552 /* 5553 * Callback from net80211 to terminate a scan. 5554 */ 5555 static void 5556 wpi_scan_end(struct ieee80211com *ic) 5557 { 5558 struct ifnet *ifp = ic->ic_ifp; 5559 struct wpi_softc *sc = ifp->if_softc; 5560 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5561 5562 if (vap->iv_state == IEEE80211_S_RUN) 5563 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5564 } 5565 5566 /** 5567 * Called by the net80211 framework to indicate to the driver 5568 * that the channel should be changed 5569 */ 5570 static void 5571 wpi_set_channel(struct ieee80211com *ic) 5572 { 5573 const struct ieee80211_channel *c = ic->ic_curchan; 5574 struct ifnet *ifp = ic->ic_ifp; 5575 struct wpi_softc *sc = ifp->if_softc; 5576 int error; 5577 5578 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5579 5580 WPI_LOCK(sc); 5581 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5582 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5583 WPI_UNLOCK(sc); 5584 WPI_TX_LOCK(sc); 5585 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5586 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5587 WPI_TX_UNLOCK(sc); 5588 5589 /* 5590 * Only need to set the channel in Monitor mode. AP scanning and auth 5591 * are already taken care of by their respective firmware commands. 5592 */ 5593 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5594 WPI_RXON_LOCK(sc); 5595 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5596 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5597 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5598 WPI_RXON_24GHZ); 5599 } else { 5600 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5601 WPI_RXON_24GHZ); 5602 } 5603 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5604 device_printf(sc->sc_dev, 5605 "%s: error %d setting channel\n", __func__, 5606 error); 5607 WPI_RXON_UNLOCK(sc); 5608 } 5609 } 5610 5611 /** 5612 * Called by net80211 to indicate that we need to scan the current 5613 * channel. The channel is previously be set via the wpi_set_channel 5614 * callback. 5615 */ 5616 static void 5617 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5618 { 5619 struct ieee80211vap *vap = ss->ss_vap; 5620 struct ieee80211com *ic = vap->iv_ic; 5621 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5622 int error; 5623 5624 WPI_RXON_LOCK(sc); 5625 error = wpi_scan(sc, ic->ic_curchan); 5626 WPI_RXON_UNLOCK(sc); 5627 if (error != 0) 5628 ieee80211_cancel_scan(vap); 5629 } 5630 5631 /** 5632 * Called by the net80211 framework to indicate 5633 * the minimum dwell time has been met, terminate the scan. 5634 * We don't actually terminate the scan as the firmware will notify 5635 * us when it's finished and we have no way to interrupt it. 5636 */ 5637 static void 5638 wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5639 { 5640 /* NB: don't try to abort scan; wait for firmware to finish */ 5641 } 5642 5643 static void 5644 wpi_hw_reset(void *arg, int pending) 5645 { 5646 struct wpi_softc *sc = arg; 5647 struct ifnet *ifp = sc->sc_ifp; 5648 struct ieee80211com *ic = ifp->if_l2com; 5649 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5650 5651 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5652 5653 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5654 ieee80211_cancel_scan(vap); 5655 5656 wpi_stop(sc); 5657 if (vap != NULL) 5658 ieee80211_stop(vap); 5659 wpi_init(sc); 5660 if (vap != NULL) 5661 ieee80211_init(vap); 5662 } 5663