1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel Wireless WiFi Link 4965 and Intel WiFi Link 5000 Series 23 * 802.11 network adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/sockio.h> 31 #include <sys/sysctl.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/endian.h> 40 #include <sys/firmware.h> 41 #include <sys/limits.h> 42 #include <sys/module.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <machine/clock.h> 49 50 #include <dev/pci/pcireg.h> 51 #include <dev/pci/pcivar.h> 52 53 #include <net/bpf.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/if_ether.h> 65 #include <netinet/ip.h> 66 67 #include <net80211/ieee80211_var.h> 68 #include <net80211/ieee80211_amrr.h> 69 #include <net80211/ieee80211_radiotap.h> 70 #include <net80211/ieee80211_regdomain.h> 71 72 #include <dev/iwn/if_iwnreg.h> 73 #include <dev/iwn/if_iwnvar.h> 74 75 static int iwn_probe(device_t); 76 static int iwn_attach(device_t); 77 const struct iwn_hal *iwn_hal_attach(struct iwn_softc *); 78 void iwn_radiotap_attach(struct iwn_softc *); 79 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 80 const char name[IFNAMSIZ], int unit, int opmode, 81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 82 const uint8_t mac[IEEE80211_ADDR_LEN]); 83 static void iwn_vap_delete(struct ieee80211vap *); 84 static int iwn_cleanup(device_t); 85 static int iwn_detach(device_t); 86 int iwn_nic_lock(struct iwn_softc *); 87 int iwn_eeprom_lock(struct iwn_softc *); 88 int iwn_init_otprom(struct iwn_softc *); 89 int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 90 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 91 void **, bus_size_t, bus_size_t, int); 92 static void iwn_dma_contig_free(struct iwn_dma_info *); 93 int iwn_alloc_sched(struct iwn_softc *); 94 void iwn_free_sched(struct iwn_softc *); 95 int iwn_alloc_kw(struct iwn_softc *); 96 void iwn_free_kw(struct iwn_softc *); 97 int iwn_alloc_fwmem(struct iwn_softc *); 98 void iwn_free_fwmem(struct iwn_softc *); 99 int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 100 void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 101 void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 102 int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 103 int); 104 void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 105 void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 106 int iwn_read_eeprom(struct iwn_softc *, 107 uint8_t macaddr[IEEE80211_ADDR_LEN]); 108 void iwn4965_read_eeprom(struct iwn_softc *); 109 void iwn4965_print_power_group(struct iwn_softc *, int); 110 void iwn5000_read_eeprom(struct iwn_softc *); 111 static void iwn_read_eeprom_channels(struct iwn_softc *, uint32_t, int); 112 struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 113 const uint8_t mac[IEEE80211_ADDR_LEN]); 114 void iwn_newassoc(struct ieee80211_node *, int); 115 int iwn_media_change(struct ifnet *); 116 int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 117 void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 118 struct iwn_rx_data *); 119 static void iwn_timer_timeout(void *); 120 static void iwn_calib_reset(struct iwn_softc *); 121 void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 122 struct iwn_rx_data *); 123 void iwn5000_rx_calib_results(struct iwn_softc *, 124 struct iwn_rx_desc *, struct iwn_rx_data *); 125 void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 126 struct iwn_rx_data *); 127 void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 128 struct iwn_rx_data *); 129 void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 130 struct iwn_rx_data *); 131 void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 132 uint8_t); 133 void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 134 void iwn_notif_intr(struct iwn_softc *); 135 void iwn_wakeup_intr(struct iwn_softc *); 136 void iwn_rftoggle_intr(struct iwn_softc *); 137 void iwn_fatal_intr(struct iwn_softc *, uint32_t, uint32_t); 138 void iwn_intr(void *); 139 void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 140 uint16_t); 141 void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 142 uint16_t); 143 void iwn5000_reset_sched(struct iwn_softc *, int, int); 144 int iwn_tx_data(struct iwn_softc *, struct mbuf *, 145 struct ieee80211_node *, struct iwn_tx_ring *); 146 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 147 const struct ieee80211_bpf_params *); 148 void iwn_start(struct ifnet *); 149 void iwn_start_locked(struct ifnet *); 150 static void iwn_watchdog(struct iwn_softc *sc); 151 int iwn_ioctl(struct ifnet *, u_long, caddr_t); 152 int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 153 int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 154 int); 155 int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 156 int); 157 int iwn_set_link_quality(struct iwn_softc *, uint8_t, 158 const struct ieee80211_channel *, int); 159 int iwn_add_broadcast_node(struct iwn_softc *, 160 const struct ieee80211_channel *, int); 161 int iwn_wme_update(struct ieee80211com *); 162 void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 163 int iwn_set_critical_temp(struct iwn_softc *); 164 int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 165 void iwn4965_power_calibration(struct iwn_softc *, int); 166 int iwn4965_set_txpower(struct iwn_softc *, 167 struct ieee80211_channel *, int); 168 int iwn5000_set_txpower(struct iwn_softc *, 169 struct ieee80211_channel *, int); 170 int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 171 int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 172 int iwn_get_noise(const struct iwn_rx_general_stats *); 173 int iwn4965_get_temperature(struct iwn_softc *); 174 int iwn5000_get_temperature(struct iwn_softc *); 175 int iwn_init_sensitivity(struct iwn_softc *); 176 void iwn_collect_noise(struct iwn_softc *, 177 const struct iwn_rx_general_stats *); 178 int iwn4965_init_gains(struct iwn_softc *); 179 int iwn5000_init_gains(struct iwn_softc *); 180 int iwn4965_set_gains(struct iwn_softc *); 181 int iwn5000_set_gains(struct iwn_softc *); 182 void iwn_tune_sensitivity(struct iwn_softc *, 183 const struct iwn_rx_stats *); 184 int iwn_send_sensitivity(struct iwn_softc *); 185 int iwn_set_pslevel(struct iwn_softc *, int, int, int); 186 int iwn_config(struct iwn_softc *); 187 int iwn_scan(struct iwn_softc *); 188 int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 189 int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 190 int iwn5000_query_calibration(struct iwn_softc *); 191 int iwn5000_send_calibration(struct iwn_softc *); 192 int iwn4965_post_alive(struct iwn_softc *); 193 int iwn5000_post_alive(struct iwn_softc *); 194 int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 195 int); 196 int iwn4965_load_firmware(struct iwn_softc *); 197 int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 198 const uint8_t *, int); 199 int iwn5000_load_firmware(struct iwn_softc *); 200 int iwn_read_firmware(struct iwn_softc *); 201 void iwn_unload_firmware(struct iwn_softc *); 202 int iwn_clock_wait(struct iwn_softc *); 203 int iwn4965_apm_init(struct iwn_softc *); 204 int iwn5000_apm_init(struct iwn_softc *); 205 void iwn_apm_stop_master(struct iwn_softc *); 206 void iwn_apm_stop(struct iwn_softc *); 207 int iwn4965_nic_config(struct iwn_softc *); 208 int iwn5000_nic_config(struct iwn_softc *); 209 int iwn_hw_prepare(struct iwn_softc *sc); 210 int iwn_hw_init(struct iwn_softc *); 211 void iwn_hw_stop(struct iwn_softc *); 212 void iwn_init_locked(struct iwn_softc *); 213 void iwn_init(void *); 214 void iwn_stop_locked(struct iwn_softc *); 215 void iwn_stop(struct iwn_softc *); 216 static void iwn_scan_start(struct ieee80211com *); 217 static void iwn_scan_end(struct ieee80211com *); 218 static void iwn_set_channel(struct ieee80211com *); 219 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 220 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 221 static void iwn_hw_reset(void *, int); 222 static void iwn_radio_on(void *, int); 223 static void iwn_radio_off(void *, int); 224 static void iwn_sysctlattach(struct iwn_softc *); 225 static int iwn_shutdown(device_t); 226 static int iwn_suspend(device_t); 227 static int iwn_resume(device_t); 228 229 #define IWN_DEBUG 230 #ifdef IWN_DEBUG 231 enum { 232 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 233 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 234 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 235 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 236 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 237 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 238 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 239 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 240 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 241 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 242 IWN_DEBUG_NODE = 0x00000400, /* node management */ 243 IWN_DEBUG_LED = 0x00000800, /* led management */ 244 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 245 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 246 IWN_DEBUG_ANY = 0xffffffff 247 }; 248 249 #define DPRINTF(sc, m, fmt, ...) do { \ 250 if (sc->sc_debug & (m)) \ 251 printf(fmt, __VA_ARGS__); \ 252 } while (0) 253 254 static const char *iwn_intr_str(uint8_t); 255 #else 256 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 257 #endif 258 259 struct iwn_ident { 260 uint16_t vendor; 261 uint16_t device; 262 const char *name; 263 }; 264 265 static const struct iwn_ident iwn_ident_table [] = { 266 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" }, 267 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" }, 268 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" }, 269 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" }, 270 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" }, 271 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" }, 272 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" }, 273 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" }, 274 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" }, 275 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" }, 276 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5350" }, 277 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" }, 278 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" }, 279 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" }, 280 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" }, 281 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" }, 282 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" }, 283 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" }, 284 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" }, 285 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" }, 286 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" }, 287 { 0, 0, NULL } 288 }; 289 290 static const struct iwn_hal iwn4965_hal = { 291 iwn4965_load_firmware, 292 iwn4965_read_eeprom, 293 iwn4965_post_alive, 294 iwn4965_apm_init, 295 iwn4965_nic_config, 296 iwn4965_update_sched, 297 iwn4965_get_temperature, 298 iwn4965_get_rssi, 299 iwn4965_set_txpower, 300 iwn4965_init_gains, 301 iwn4965_set_gains, 302 iwn4965_add_node, 303 iwn4965_tx_done, 304 &iwn4965_sensitivity_limits, 305 IWN4965_NTXQUEUES, 306 IWN4965_NDMACHNLS, 307 IWN4965_ID_BROADCAST, 308 IWN4965_RXONSZ, 309 IWN4965_SCHEDSZ, 310 IWN4965_FW_TEXT_MAXSZ, 311 IWN4965_FW_DATA_MAXSZ, 312 IWN4965_FWSZ, 313 IWN4965_SCHED_TXFACT, 314 }; 315 316 static const struct iwn_hal iwn5000_hal = { 317 iwn5000_load_firmware, 318 iwn5000_read_eeprom, 319 iwn5000_post_alive, 320 iwn5000_apm_init, 321 iwn5000_nic_config, 322 iwn5000_update_sched, 323 iwn5000_get_temperature, 324 iwn5000_get_rssi, 325 iwn5000_set_txpower, 326 iwn5000_init_gains, 327 iwn5000_set_gains, 328 iwn5000_add_node, 329 iwn5000_tx_done, 330 &iwn5000_sensitivity_limits, 331 IWN5000_NTXQUEUES, 332 IWN5000_NDMACHNLS, 333 IWN5000_ID_BROADCAST, 334 IWN5000_RXONSZ, 335 IWN5000_SCHEDSZ, 336 IWN5000_FW_TEXT_MAXSZ, 337 IWN5000_FW_DATA_MAXSZ, 338 IWN5000_FWSZ, 339 IWN5000_SCHED_TXFACT, 340 }; 341 342 static int 343 iwn_probe(device_t dev) 344 { 345 const struct iwn_ident *ident; 346 347 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 348 if (pci_get_vendor(dev) == ident->vendor && 349 pci_get_device(dev) == ident->device) { 350 device_set_desc(dev, ident->name); 351 return 0; 352 } 353 } 354 return ENXIO; 355 } 356 357 static int 358 iwn_attach(device_t dev) 359 { 360 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 361 struct ieee80211com *ic; 362 struct ifnet *ifp; 363 const struct iwn_hal *hal; 364 uint32_t tmp; 365 int i, error, result; 366 uint8_t macaddr[IEEE80211_ADDR_LEN]; 367 368 sc->sc_dev = dev; 369 370 /* 371 * Get the offset of the PCI Express Capability Structure in PCI 372 * Configuration Space. 373 */ 374 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 375 if (error != 0) { 376 device_printf(dev, "PCIe capability structure not found!\n"); 377 return error; 378 } 379 380 /* Clear device-specific "PCI retry timeout" register (41h). */ 381 pci_write_config(dev, 0x41, 0, 1); 382 383 /* Hardware bug workaround. */ 384 tmp = pci_read_config(dev, PCIR_COMMAND, 1); 385 if (tmp & PCIM_CMD_INTxDIS) { 386 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 387 __func__); 388 tmp &= ~PCIM_CMD_INTxDIS; 389 pci_write_config(dev, PCIR_COMMAND, tmp, 1); 390 } 391 392 /* Enable bus-mastering. */ 393 pci_enable_busmaster(dev); 394 395 sc->mem_rid = PCIR_BAR(0); 396 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 397 RF_ACTIVE); 398 if (sc->mem == NULL ) { 399 device_printf(dev, "could not allocate memory resources\n"); 400 error = ENOMEM; 401 return error; 402 } 403 404 sc->sc_st = rman_get_bustag(sc->mem); 405 sc->sc_sh = rman_get_bushandle(sc->mem); 406 sc->irq_rid = 0; 407 if ((result = pci_msi_count(dev)) == 1 && 408 pci_alloc_msi(dev, &result) == 0) 409 sc->irq_rid = 1; 410 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 411 RF_ACTIVE | RF_SHAREABLE); 412 if (sc->irq == NULL) { 413 device_printf(dev, "could not allocate interrupt resource\n"); 414 error = ENOMEM; 415 goto fail; 416 } 417 418 IWN_LOCK_INIT(sc); 419 callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0); 420 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc ); 421 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc ); 422 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc ); 423 424 /* Attach Hardware Abstraction Layer. */ 425 hal = iwn_hal_attach(sc); 426 if (hal == NULL) { 427 error = ENXIO; /* XXX: Wrong error code? */ 428 goto fail; 429 } 430 431 error = iwn_hw_prepare(sc); 432 if (error != 0) { 433 device_printf(dev, "hardware not ready, error %d\n", error); 434 goto fail; 435 } 436 437 /* Power ON adapter. */ 438 error = hal->apm_init(sc); 439 if (error != 0) { 440 device_printf(dev, "could not power ON adapter, error %d\n", 441 error); 442 goto fail; 443 } 444 445 /* Allocate DMA memory for firmware transfers. */ 446 error = iwn_alloc_fwmem(sc); 447 if (error != 0) { 448 device_printf(dev, 449 "could not allocate memory for firmware, error %d\n", 450 error); 451 goto fail; 452 } 453 454 /* Allocate "Keep Warm" page. */ 455 error = iwn_alloc_kw(sc); 456 if (error != 0) { 457 device_printf(dev, 458 "could not allocate \"Keep Warm\" page, error %d\n", error); 459 goto fail; 460 } 461 462 /* Allocate TX scheduler "rings". */ 463 error = iwn_alloc_sched(sc); 464 if (error != 0) { 465 device_printf(dev, 466 "could not allocate TX scheduler rings, error %d\n", 467 error); 468 goto fail; 469 } 470 471 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */ 472 for (i = 0; i < hal->ntxqs; i++) { 473 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i); 474 if (error != 0) { 475 device_printf(dev, 476 "could not allocate Tx ring %d, error %d\n", 477 i, error); 478 goto fail; 479 } 480 } 481 482 /* Allocate RX ring. */ 483 error = iwn_alloc_rx_ring(sc, &sc->rxq); 484 if (error != 0 ){ 485 device_printf(dev, 486 "could not allocate Rx ring, error %d\n", error); 487 goto fail; 488 } 489 490 /* Clear pending interrupts. */ 491 IWN_WRITE(sc, IWN_INT, 0xffffffff); 492 493 /* Initialization firmware has not been loaded yet. */ 494 sc->sc_flags |= IWN_FLAG_FIRST_BOOT; 495 496 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 497 if (ifp == NULL) { 498 device_printf(dev, "can not allocate ifnet structure\n"); 499 goto fail; 500 } 501 ic = ifp->if_l2com; 502 503 ic->ic_ifp = ifp; 504 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 505 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 506 507 /* Set device capabilities. */ 508 ic->ic_caps = 509 IEEE80211_C_STA /* station mode supported */ 510 | IEEE80211_C_MONITOR /* monitor mode supported */ 511 | IEEE80211_C_TXPMGT /* tx power management */ 512 | IEEE80211_C_SHSLOT /* short slot time supported */ 513 | IEEE80211_C_WPA 514 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 515 #if 0 516 | IEEE80211_C_BGSCAN /* background scanning */ 517 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 518 #endif 519 | IEEE80211_C_WME /* WME */ 520 ; 521 #if 0 522 /* XXX disable until HT channel setup works */ 523 ic->ic_htcaps = 524 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ 525 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ 526 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 527 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 528 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ 529 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 530 /* s/w capabilities */ 531 | IEEE80211_HTC_HT /* HT operation */ 532 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 533 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 534 ; 535 #endif 536 537 /* Read MAC address, channels, etc from EEPROM. */ 538 error = iwn_read_eeprom(sc, macaddr); 539 if (error != 0) { 540 device_printf(dev, "could not read EEPROM, error %d\n", 541 error); 542 goto fail; 543 } 544 545 /* Power OFF adapter. */ 546 iwn_apm_stop(sc); 547 548 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n", 549 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 550 macaddr, ":"); 551 552 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 553 ifp->if_softc = sc; 554 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 555 ifp->if_init = iwn_init; 556 ifp->if_ioctl = iwn_ioctl; 557 ifp->if_start = iwn_start; 558 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 559 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 560 IFQ_SET_READY(&ifp->if_snd); 561 562 ieee80211_ifattach(ic, macaddr); 563 ic->ic_vap_create = iwn_vap_create; 564 ic->ic_vap_delete = iwn_vap_delete; 565 ic->ic_raw_xmit = iwn_raw_xmit; 566 ic->ic_node_alloc = iwn_node_alloc; 567 ic->ic_newassoc = iwn_newassoc; 568 ic->ic_wme.wme_update = iwn_wme_update; 569 ic->ic_scan_start = iwn_scan_start; 570 ic->ic_scan_end = iwn_scan_end; 571 ic->ic_set_channel = iwn_set_channel; 572 ic->ic_scan_curchan = iwn_scan_curchan; 573 ic->ic_scan_mindwell = iwn_scan_mindwell; 574 575 iwn_radiotap_attach(sc); 576 iwn_sysctlattach(sc); 577 578 /* 579 * Hook our interrupt after all initialization is complete. 580 */ 581 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 582 NULL, iwn_intr, sc, &sc->sc_ih); 583 if (error != 0) { 584 device_printf(dev, "could not set up interrupt, error %d\n", 585 error); 586 goto fail; 587 } 588 589 ieee80211_announce(ic); 590 return 0; 591 fail: 592 iwn_cleanup(dev); 593 return error; 594 } 595 596 const struct iwn_hal * 597 iwn_hal_attach(struct iwn_softc *sc) 598 { 599 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 600 601 switch (sc->hw_type) { 602 case IWN_HW_REV_TYPE_4965: 603 sc->sc_hal = &iwn4965_hal; 604 sc->fwname = "iwn4965fw"; 605 sc->critical_temp = IWN_CTOK(110); 606 sc->txantmsk = IWN_ANT_A | IWN_ANT_B; 607 sc->rxantmsk = IWN_ANT_ABC; 608 sc->ntxchains = 2; 609 sc->nrxchains = 3; 610 break; 611 case IWN_HW_REV_TYPE_5100: 612 sc->sc_hal = &iwn5000_hal; 613 sc->fwname = "iwn5000fw"; 614 sc->critical_temp = 110; 615 sc->txantmsk = IWN_ANT_B; 616 sc->rxantmsk = IWN_ANT_A | IWN_ANT_B; 617 sc->ntxchains = 1; 618 sc->nrxchains = 2; 619 break; 620 case IWN_HW_REV_TYPE_5150: 621 sc->sc_hal = &iwn5000_hal; 622 sc->fwname = "iwn5150fw"; 623 /* NB: critical temperature will be read from EEPROM. */ 624 sc->txantmsk = IWN_ANT_A; 625 sc->rxantmsk = IWN_ANT_A | IWN_ANT_B; 626 sc->ntxchains = 1; 627 sc->nrxchains = 2; 628 break; 629 case IWN_HW_REV_TYPE_5300: 630 case IWN_HW_REV_TYPE_5350: 631 sc->sc_hal = &iwn5000_hal; 632 sc->fwname = "iwn5000fw"; 633 sc->critical_temp = 110; 634 sc->txantmsk = sc->rxantmsk = IWN_ANT_ABC; 635 sc->ntxchains = sc->nrxchains = 3; 636 break; 637 case IWN_HW_REV_TYPE_1000: 638 sc->sc_hal = &iwn5000_hal; 639 sc->fwname = "iwn1000fw"; 640 sc->critical_temp = 110; 641 sc->txantmsk = IWN_ANT_A; 642 sc->rxantmsk = IWN_ANT_A | IWN_ANT_B; 643 sc->ntxchains = 1; 644 sc->nrxchains = 2; 645 break; 646 case IWN_HW_REV_TYPE_6000: 647 sc->sc_hal = &iwn5000_hal; 648 sc->fwname = "iwn6000fw"; 649 sc->critical_temp = 110; 650 sc->txantmsk = IWN_ANT_ABC; 651 sc->rxantmsk = IWN_ANT_ABC; 652 sc->ntxchains = 3; 653 sc->nrxchains = 3; 654 break; 655 case IWN_HW_REV_TYPE_6050: 656 sc->sc_hal = &iwn5000_hal; 657 sc->fwname = "iwn6050fw"; 658 sc->critical_temp = 110; 659 sc->txantmsk = IWN_ANT_ABC; 660 sc->rxantmsk = IWN_ANT_ABC; 661 sc->ntxchains = 3; 662 sc->nrxchains = 3; 663 break; 664 default: 665 device_printf(sc->sc_dev, "adapter type %d not supported\n", 666 sc->hw_type); 667 return NULL; 668 } 669 return sc->sc_hal; 670 } 671 672 /* 673 * Attach the interface to 802.11 radiotap. 674 */ 675 void 676 iwn_radiotap_attach(struct iwn_softc *sc) 677 { 678 struct ifnet *ifp = sc->sc_ifp; 679 struct ieee80211com *ic = ifp->if_l2com; 680 681 ieee80211_radiotap_attach(ic, 682 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 683 IWN_TX_RADIOTAP_PRESENT, 684 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 685 IWN_RX_RADIOTAP_PRESENT); 686 } 687 688 static struct ieee80211vap * 689 iwn_vap_create(struct ieee80211com *ic, 690 const char name[IFNAMSIZ], int unit, int opmode, int flags, 691 const uint8_t bssid[IEEE80211_ADDR_LEN], 692 const uint8_t mac[IEEE80211_ADDR_LEN]) 693 { 694 struct iwn_vap *ivp; 695 struct ieee80211vap *vap; 696 697 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 698 return NULL; 699 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 700 M_80211_VAP, M_NOWAIT | M_ZERO); 701 if (ivp == NULL) 702 return NULL; 703 vap = &ivp->iv_vap; 704 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 705 vap->iv_bmissthreshold = 10; /* override default */ 706 /* Override with driver methods. */ 707 ivp->iv_newstate = vap->iv_newstate; 708 vap->iv_newstate = iwn_newstate; 709 710 ieee80211_amrr_init(&ivp->iv_amrr, vap, 711 IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, 712 IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 713 500 /* ms */); 714 715 /* Complete setup. */ 716 ieee80211_vap_attach(vap, ieee80211_media_change, 717 ieee80211_media_status); 718 ic->ic_opmode = opmode; 719 return vap; 720 } 721 722 static void 723 iwn_vap_delete(struct ieee80211vap *vap) 724 { 725 struct iwn_vap *ivp = IWN_VAP(vap); 726 727 ieee80211_amrr_cleanup(&ivp->iv_amrr); 728 ieee80211_vap_detach(vap); 729 free(ivp, M_80211_VAP); 730 } 731 732 int 733 iwn_cleanup(device_t dev) 734 { 735 struct iwn_softc *sc = device_get_softc(dev); 736 struct ifnet *ifp = sc->sc_ifp; 737 struct ieee80211com *ic; 738 int i; 739 740 if (ifp != NULL) { 741 ic = ifp->if_l2com; 742 743 ieee80211_draintask(ic, &sc->sc_reinit_task); 744 ieee80211_draintask(ic, &sc->sc_radioon_task); 745 ieee80211_draintask(ic, &sc->sc_radiooff_task); 746 747 iwn_stop(sc); 748 callout_drain(&sc->sc_timer_to); 749 ieee80211_ifdetach(ic); 750 } 751 752 iwn_unload_firmware(sc); 753 754 iwn_free_rx_ring(sc, &sc->rxq); 755 756 if (sc->sc_hal != NULL) 757 for (i = 0; i < sc->sc_hal->ntxqs; i++) 758 iwn_free_tx_ring(sc, &sc->txq[i]); 759 760 iwn_free_sched(sc); 761 iwn_free_kw(sc); 762 iwn_free_fwmem(sc); 763 764 if (sc->irq != NULL) { 765 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 766 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 767 if (sc->irq_rid == 1) 768 pci_release_msi(dev); 769 } 770 771 if (sc->mem != NULL) 772 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 773 774 if (ifp != NULL) 775 if_free(ifp); 776 777 IWN_LOCK_DESTROY(sc); 778 return 0; 779 } 780 781 static int 782 iwn_detach(device_t dev) 783 { 784 iwn_cleanup(dev); 785 return 0; 786 } 787 788 int 789 iwn_nic_lock(struct iwn_softc *sc) 790 { 791 int ntries; 792 793 /* Request exclusive access to NIC. */ 794 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 795 796 /* Spin until we actually get the lock. */ 797 for (ntries = 0; ntries < 1000; ntries++) { 798 if ((IWN_READ(sc, IWN_GP_CNTRL) & 799 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 800 IWN_GP_CNTRL_MAC_ACCESS_ENA) 801 return 0; 802 DELAY(10); 803 } 804 return ETIMEDOUT; 805 } 806 807 static __inline void 808 iwn_nic_unlock(struct iwn_softc *sc) 809 { 810 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 811 } 812 813 static __inline uint32_t 814 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 815 { 816 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 817 return IWN_READ(sc, IWN_PRPH_RDATA); 818 } 819 820 static __inline void 821 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 822 { 823 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 824 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 825 } 826 827 static __inline void 828 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 829 { 830 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 831 } 832 833 static __inline void 834 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 835 { 836 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 837 } 838 839 static __inline void 840 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 841 const uint32_t *data, int count) 842 { 843 for (; count > 0; count--, data++, addr += 4) 844 iwn_prph_write(sc, addr, *data); 845 } 846 847 static __inline uint32_t 848 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 849 { 850 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 851 return IWN_READ(sc, IWN_MEM_RDATA); 852 } 853 854 static __inline void 855 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 856 { 857 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 858 IWN_WRITE(sc, IWN_MEM_WDATA, data); 859 } 860 861 static __inline void 862 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 863 { 864 uint32_t tmp; 865 866 tmp = iwn_mem_read(sc, addr & ~3); 867 if (addr & 3) 868 tmp = (tmp & 0x0000ffff) | data << 16; 869 else 870 tmp = (tmp & 0xffff0000) | data; 871 iwn_mem_write(sc, addr & ~3, tmp); 872 } 873 874 static __inline void 875 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 876 int count) 877 { 878 for (; count > 0; count--, addr += 4) 879 *data++ = iwn_mem_read(sc, addr); 880 } 881 882 static __inline void 883 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 884 int count) 885 { 886 for (; count > 0; count--, addr += 4) 887 iwn_mem_write(sc, addr, val); 888 } 889 890 int 891 iwn_eeprom_lock(struct iwn_softc *sc) 892 { 893 int i, ntries; 894 895 for (i = 0; i < 100; i++) { 896 /* Request exclusive access to EEPROM. */ 897 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 898 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 899 900 /* Spin until we actually get the lock. */ 901 for (ntries = 0; ntries < 100; ntries++) { 902 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 903 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 904 return 0; 905 DELAY(10); 906 } 907 } 908 return ETIMEDOUT; 909 } 910 911 static __inline void 912 iwn_eeprom_unlock(struct iwn_softc *sc) 913 { 914 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 915 } 916 917 /* 918 * Initialize access by host to One Time Programmable ROM. 919 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 920 */ 921 int 922 iwn_init_otprom(struct iwn_softc *sc) 923 { 924 int error; 925 926 error = iwn_clock_wait(sc); 927 if (error != 0) 928 return error; 929 930 error = iwn_nic_lock(sc); 931 if (error != 0) 932 return error; 933 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 934 DELAY(5); 935 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 936 iwn_nic_unlock(sc); 937 938 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 939 /* Clear ECC status. */ 940 IWN_SETBITS(sc, IWN_OTP_GP, 941 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 942 943 return 0; 944 } 945 946 int 947 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 948 { 949 uint32_t val, tmp; 950 int ntries; 951 uint8_t *out = data; 952 953 for (; count > 0; count -= 2, addr++) { 954 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 955 for (ntries = 0; ntries < 100; ntries++) { 956 val = IWN_READ(sc, IWN_EEPROM); 957 if (val & IWN_EEPROM_READ_VALID) 958 break; 959 DELAY(5); 960 } 961 if (ntries == 100) { 962 device_printf(sc->sc_dev, 963 "timeout reading ROM at 0x%x\n", addr); 964 return ETIMEDOUT; 965 } 966 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 967 /* OTPROM, check for ECC errors. */ 968 tmp = IWN_READ(sc, IWN_OTP_GP); 969 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 970 device_printf(sc->sc_dev, 971 "OTPROM ECC error at 0x%x\n", addr); 972 return EIO; 973 } 974 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 975 /* Correctable ECC error, clear bit. */ 976 IWN_SETBITS(sc, IWN_OTP_GP, 977 IWN_OTP_GP_ECC_CORR_STTS); 978 } 979 } 980 *out++ = val >> 16; 981 if (count > 1) 982 *out++ = val >> 24; 983 } 984 return 0; 985 } 986 987 static void 988 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 989 { 990 if (error != 0) 991 return; 992 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 993 *(bus_addr_t *)arg = segs[0].ds_addr; 994 } 995 996 static int 997 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 998 void **kvap, bus_size_t size, bus_size_t alignment, int flags) 999 { 1000 int error; 1001 1002 dma->size = size; 1003 dma->tag = NULL; 1004 1005 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1006 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1007 1, size, flags, NULL, NULL, &dma->tag); 1008 if (error != 0) { 1009 device_printf(sc->sc_dev, 1010 "%s: bus_dma_tag_create failed, error %d\n", 1011 __func__, error); 1012 goto fail; 1013 } 1014 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1015 flags | BUS_DMA_ZERO, &dma->map); 1016 if (error != 0) { 1017 device_printf(sc->sc_dev, 1018 "%s: bus_dmamem_alloc failed, error %d\n", 1019 __func__, error); 1020 goto fail; 1021 } 1022 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 1023 size, iwn_dma_map_addr, &dma->paddr, flags); 1024 if (error != 0) { 1025 device_printf(sc->sc_dev, 1026 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1027 goto fail; 1028 } 1029 1030 if (kvap != NULL) 1031 *kvap = dma->vaddr; 1032 return 0; 1033 fail: 1034 iwn_dma_contig_free(dma); 1035 return error; 1036 } 1037 1038 static void 1039 iwn_dma_contig_free(struct iwn_dma_info *dma) 1040 { 1041 if (dma->tag != NULL) { 1042 if (dma->map != NULL) { 1043 if (dma->paddr == 0) { 1044 bus_dmamap_sync(dma->tag, dma->map, 1045 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1046 bus_dmamap_unload(dma->tag, dma->map); 1047 } 1048 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); 1049 } 1050 bus_dma_tag_destroy(dma->tag); 1051 } 1052 } 1053 1054 int 1055 iwn_alloc_sched(struct iwn_softc *sc) 1056 { 1057 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1058 return iwn_dma_contig_alloc(sc, &sc->sched_dma, 1059 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT); 1060 } 1061 1062 void 1063 iwn_free_sched(struct iwn_softc *sc) 1064 { 1065 iwn_dma_contig_free(&sc->sched_dma); 1066 } 1067 1068 int 1069 iwn_alloc_kw(struct iwn_softc *sc) 1070 { 1071 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1072 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096, 1073 BUS_DMA_NOWAIT); 1074 } 1075 1076 void 1077 iwn_free_kw(struct iwn_softc *sc) 1078 { 1079 iwn_dma_contig_free(&sc->kw_dma); 1080 } 1081 1082 int 1083 iwn_alloc_fwmem(struct iwn_softc *sc) 1084 { 1085 /* Must be aligned on a 16-byte boundary. */ 1086 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1087 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT); 1088 } 1089 1090 void 1091 iwn_free_fwmem(struct iwn_softc *sc) 1092 { 1093 iwn_dma_contig_free(&sc->fw_dma); 1094 } 1095 1096 int 1097 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1098 { 1099 bus_size_t size; 1100 int i, error; 1101 1102 ring->cur = 0; 1103 1104 /* Allocate RX descriptors (256-byte aligned). */ 1105 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1106 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1107 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1108 if (error != 0) { 1109 device_printf(sc->sc_dev, 1110 "%s: could not allocate Rx ring DMA memory, error %d\n", 1111 __func__, error); 1112 goto fail; 1113 } 1114 1115 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1116 BUS_SPACE_MAXADDR_32BIT, 1117 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, 1118 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->desc_dma.tag); 1119 if (error != 0) { 1120 device_printf(sc->sc_dev, 1121 "%s: bus_dma_tag_create_failed, error %d\n", 1122 __func__, error); 1123 goto fail; 1124 } 1125 1126 /* Allocate RX status area (16-byte aligned). */ 1127 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, 1128 (void **)&ring->stat, sizeof (struct iwn_rx_status), 1129 16, BUS_DMA_NOWAIT); 1130 if (error != 0) { 1131 device_printf(sc->sc_dev, 1132 "%s: could not allocate Rx status DMA memory, error %d\n", 1133 __func__, error); 1134 goto fail; 1135 } 1136 1137 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1138 BUS_SPACE_MAXADDR_32BIT, 1139 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, 1140 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->desc_dma.tag); 1141 if (error != 0) { 1142 device_printf(sc->sc_dev, 1143 "%s: bus_dma_tag_create_failed, error %d\n", 1144 __func__, error); 1145 goto fail; 1146 } 1147 1148 /* 1149 * Allocate and map RX buffers. 1150 */ 1151 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1152 struct iwn_rx_data *data = &ring->data[i]; 1153 bus_addr_t paddr; 1154 1155 error = bus_dmamap_create(ring->desc_dma.tag, 0, &data->map); 1156 if (error != 0) { 1157 device_printf(sc->sc_dev, 1158 "%s: bus_dmamap_create failed, error %d\n", 1159 __func__, error); 1160 goto fail; 1161 } 1162 1163 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1164 if (data->m == NULL) { 1165 device_printf(sc->sc_dev, 1166 "%s: could not allocate rx mbuf\n", __func__); 1167 error = ENOMEM; 1168 goto fail; 1169 } 1170 1171 /* Map page. */ 1172 error = bus_dmamap_load(ring->desc_dma.tag, data->map, 1173 mtod(data->m, caddr_t), MJUMPAGESIZE, 1174 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1175 if (error != 0 && error != EFBIG) { 1176 device_printf(sc->sc_dev, 1177 "%s: bus_dmamap_load failed, error %d\n", 1178 __func__, error); 1179 m_freem(data->m); 1180 error = ENOMEM; /* XXX unique code */ 1181 goto fail; 1182 } 1183 1184 /* Set physical address of RX buffer (256-byte aligned). */ 1185 ring->desc[i] = htole32(paddr >> 8); 1186 } 1187 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1188 BUS_DMASYNC_PREWRITE); 1189 return 0; 1190 fail: 1191 iwn_free_rx_ring(sc, ring); 1192 return error; 1193 } 1194 1195 void 1196 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1197 { 1198 int ntries; 1199 1200 if (iwn_nic_lock(sc) == 0) { 1201 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1202 for (ntries = 0; ntries < 1000; ntries++) { 1203 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1204 IWN_FH_RX_STATUS_IDLE) 1205 break; 1206 DELAY(10); 1207 } 1208 iwn_nic_unlock(sc); 1209 #ifdef IWN_DEBUG 1210 if (ntries == 1000) 1211 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 1212 "timeout resetting Rx ring"); 1213 #endif 1214 } 1215 ring->cur = 0; 1216 sc->last_rx_valid = 0; 1217 } 1218 1219 void 1220 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1221 { 1222 int i; 1223 1224 iwn_dma_contig_free(&ring->desc_dma); 1225 iwn_dma_contig_free(&ring->stat_dma); 1226 1227 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1228 struct iwn_rx_data *data = &ring->data[i]; 1229 1230 if (data->m != NULL) { 1231 bus_dmamap_sync(ring->desc_dma.tag, data->map, 1232 BUS_DMASYNC_POSTREAD); 1233 bus_dmamap_unload(ring->desc_dma.tag, data->map); 1234 m_freem(data->m); 1235 } 1236 } 1237 } 1238 1239 int 1240 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1241 { 1242 bus_size_t size; 1243 bus_addr_t paddr; 1244 int i, error; 1245 1246 ring->qid = qid; 1247 ring->queued = 0; 1248 ring->cur = 0; 1249 1250 /* Allocate TX descriptors (256-byte aligned.) */ 1251 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc); 1252 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1253 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1254 if (error != 0) { 1255 device_printf(sc->sc_dev, 1256 "%s: could not allocate TX ring DMA memory, error %d\n", 1257 __func__, error); 1258 goto fail; 1259 } 1260 1261 /* 1262 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1263 * to allocate commands space for other rings. 1264 */ 1265 if (qid > 4) 1266 return 0; 1267 1268 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd); 1269 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, 1270 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT); 1271 if (error != 0) { 1272 device_printf(sc->sc_dev, 1273 "%s: could not allocate TX cmd DMA memory, error %d\n", 1274 __func__, error); 1275 goto fail; 1276 } 1277 1278 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1279 BUS_SPACE_MAXADDR_32BIT, 1280 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, 1281 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->desc_dma.tag); 1282 if (error != 0) { 1283 device_printf(sc->sc_dev, 1284 "%s: bus_dma_tag_create_failed, error %d\n", 1285 __func__, error); 1286 goto fail; 1287 } 1288 1289 paddr = ring->cmd_dma.paddr; 1290 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1291 struct iwn_tx_data *data = &ring->data[i]; 1292 1293 data->cmd_paddr = paddr; 1294 data->scratch_paddr = paddr + 12; 1295 paddr += sizeof (struct iwn_tx_cmd); 1296 1297 error = bus_dmamap_create(ring->desc_dma.tag, 0, &data->map); 1298 if (error != 0) { 1299 device_printf(sc->sc_dev, 1300 "%s: bus_dmamap_create failed, error %d\n", 1301 __func__, error); 1302 goto fail; 1303 } 1304 } 1305 return 0; 1306 fail: 1307 iwn_free_tx_ring(sc, ring); 1308 return error; 1309 } 1310 1311 void 1312 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1313 { 1314 int i; 1315 1316 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1317 struct iwn_tx_data *data = &ring->data[i]; 1318 1319 if (data->m != NULL) { 1320 bus_dmamap_sync(ring->desc_dma.tag, data->map, 1321 BUS_DMASYNC_POSTWRITE); 1322 bus_dmamap_unload(ring->desc_dma.tag, data->map); 1323 m_freem(data->m); 1324 data->m = NULL; 1325 } 1326 } 1327 /* Clear TX descriptors. */ 1328 memset(ring->desc, 0, ring->desc_dma.size); 1329 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1330 BUS_DMASYNC_PREWRITE); 1331 sc->qfullmsk &= ~(1 << ring->qid); 1332 ring->queued = 0; 1333 ring->cur = 0; 1334 } 1335 1336 void 1337 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1338 { 1339 int i; 1340 1341 iwn_dma_contig_free(&ring->desc_dma); 1342 iwn_dma_contig_free(&ring->cmd_dma); 1343 1344 if (ring->data != NULL) { 1345 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1346 struct iwn_tx_data *data = &ring->data[i]; 1347 1348 if (data->m != NULL) { 1349 bus_dmamap_sync(ring->desc_dma.tag, data->map, 1350 BUS_DMASYNC_POSTWRITE); 1351 bus_dmamap_unload(ring->desc_dma.tag, 1352 data->map); 1353 m_freem(data->m); 1354 } 1355 } 1356 } 1357 } 1358 1359 int 1360 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1361 { 1362 const struct iwn_hal *hal = sc->sc_hal; 1363 int error; 1364 uint16_t val; 1365 1366 /* Check whether adapter has an EEPROM or an OTPROM. */ 1367 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1368 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1369 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1370 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1371 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1372 1373 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1374 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1375 return EIO; 1376 } 1377 error = iwn_eeprom_lock(sc); 1378 if (error != 0) { 1379 device_printf(sc->sc_dev, 1380 "%s: could not lock ROM, error %d\n", 1381 __func__, error); 1382 return error; 1383 } 1384 1385 if ((sc->sc_flags & IWN_FLAG_HAS_OTPROM) && 1386 ((error = iwn_init_otprom(sc)) != 0)) { 1387 device_printf(sc->sc_dev, 1388 "%s: could not initialize OTPROM, error %d\n", 1389 __func__, error); 1390 return error; 1391 } 1392 1393 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1394 sc->rfcfg = le16toh(val); 1395 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1396 1397 /* Read MAC address. */ 1398 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1399 1400 /* Read adapter-specific information from EEPROM. */ 1401 hal->read_eeprom(sc); 1402 1403 iwn_eeprom_unlock(sc); 1404 return 0; 1405 } 1406 1407 void 1408 iwn4965_read_eeprom(struct iwn_softc *sc) 1409 { 1410 int i; 1411 uint16_t val; 1412 1413 /* Read regulatory domain (4 ASCII characters.) */ 1414 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1415 1416 /* Read the list of authorized channels. */ 1417 for (i = 0; i < 7; i++) 1418 iwn_read_eeprom_channels(sc, iwn4965_regulatory_bands[i], i); 1419 1420 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1421 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1422 sc->maxpwr2GHz = val & 0xff; 1423 sc->maxpwr5GHz = val >> 8; 1424 /* Check that EEPROM values are within valid range. */ 1425 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1426 sc->maxpwr5GHz = 38; 1427 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1428 sc->maxpwr2GHz = 38; 1429 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1430 sc->maxpwr2GHz, sc->maxpwr5GHz); 1431 1432 /* Read samples for each TX power group. */ 1433 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1434 sizeof sc->bands); 1435 1436 /* Read voltage at which samples were taken. */ 1437 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1438 sc->eeprom_voltage = (int16_t)le16toh(val); 1439 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1440 sc->eeprom_voltage); 1441 1442 #ifdef IWN_DEBUG 1443 /* Print samples. */ 1444 if (sc->sc_debug & IWN_DEBUG_ANY || 1) { 1445 for (i = 0; i < IWN_NBANDS; i++) 1446 iwn4965_print_power_group(sc, i); 1447 } 1448 #endif 1449 } 1450 1451 #ifdef IWN_DEBUG 1452 void 1453 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1454 { 1455 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1456 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1457 int j, c; 1458 1459 printf("===band %d===\n", i); 1460 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1461 printf("chan1 num=%d\n", chans[0].num); 1462 for (c = 0; c < 2; c++) { 1463 for (j = 0; j < IWN_NSAMPLES; j++) { 1464 printf("chain %d, sample %d: temp=%d gain=%d " 1465 "power=%d pa_det=%d\n", c, j, 1466 chans[0].samples[c][j].temp, 1467 chans[0].samples[c][j].gain, 1468 chans[0].samples[c][j].power, 1469 chans[0].samples[c][j].pa_det); 1470 } 1471 } 1472 printf("chan2 num=%d\n", chans[1].num); 1473 for (c = 0; c < 2; c++) { 1474 for (j = 0; j < IWN_NSAMPLES; j++) { 1475 printf("chain %d, sample %d: temp=%d gain=%d " 1476 "power=%d pa_det=%d\n", c, j, 1477 chans[1].samples[c][j].temp, 1478 chans[1].samples[c][j].gain, 1479 chans[1].samples[c][j].power, 1480 chans[1].samples[c][j].pa_det); 1481 } 1482 } 1483 } 1484 #endif 1485 1486 void 1487 iwn5000_read_eeprom(struct iwn_softc *sc) 1488 { 1489 int32_t temp, volt, delta; 1490 uint32_t addr, base; 1491 int i; 1492 uint16_t val; 1493 1494 /* Read regulatory domain (4 ASCII characters.) */ 1495 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1496 base = le16toh(val); 1497 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1498 sc->eeprom_domain, 4); 1499 1500 /* Read the list of authorized channels. */ 1501 for (i = 0; i < 7; i++) { 1502 addr = base + iwn5000_regulatory_bands[i]; 1503 iwn_read_eeprom_channels(sc, addr, i); 1504 } 1505 1506 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1507 base = le16toh(val); 1508 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1509 /* Compute critical temperature (in Kelvin.) */ 1510 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1511 temp = le16toh(val); 1512 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1513 volt = le16toh(val); 1514 delta = temp - (volt / -5); 1515 sc->critical_temp = (IWN_CTOK(110) - delta) * -5; 1516 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d delta=%dK\n", 1517 temp, volt, delta); 1518 } else { 1519 /* Read crystal calibration. */ 1520 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1521 &sc->eeprom_crystal, sizeof (uint32_t)); 1522 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 1523 le32toh(sc->eeprom_crystal)); 1524 } 1525 } 1526 1527 static void 1528 iwn_read_eeprom_band(struct iwn_softc *sc, const struct iwn_chan_band *band, 1529 uint32_t flags, uint32_t addr) 1530 { 1531 struct ifnet *ifp = sc->sc_ifp; 1532 struct ieee80211com *ic = ifp->if_l2com; 1533 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1534 struct ieee80211_channel *c; 1535 int i, chan, nflags; 1536 1537 iwn_read_prom_data(sc, addr, channels, 1538 band->nchan * sizeof (struct iwn_eeprom_chan)); 1539 1540 for (i = 0; i < band->nchan; i++) { 1541 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1542 DPRINTF(sc, IWN_DEBUG_RESET, 1543 "skip chan %d flags 0x%x maxpwr %d\n", 1544 band->chan[i], channels[i].flags, 1545 channels[i].maxpwr); 1546 continue; 1547 } 1548 chan = band->chan[i]; 1549 1550 /* Translate EEPROM flags to net80211 */ 1551 nflags = 0; 1552 if ((channels[i].flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1553 nflags |= IEEE80211_CHAN_PASSIVE; 1554 if ((channels[i].flags & IWN_EEPROM_CHAN_IBSS) == 0) 1555 nflags |= IEEE80211_CHAN_NOADHOC; 1556 if (channels[i].flags & IWN_EEPROM_CHAN_RADAR) { 1557 nflags |= IEEE80211_CHAN_DFS; 1558 /* XXX apparently IBSS may still be marked */ 1559 nflags |= IEEE80211_CHAN_NOADHOC; 1560 } 1561 1562 DPRINTF(sc, IWN_DEBUG_RESET, 1563 "add chan %d flags 0x%x maxpwr %d\n", 1564 chan, channels[i].flags, channels[i].maxpwr); 1565 1566 c = &ic->ic_channels[ic->ic_nchans++]; 1567 c->ic_ieee = chan; 1568 c->ic_freq = ieee80211_ieee2mhz(chan, flags); 1569 c->ic_maxregpower = channels[i].maxpwr; 1570 c->ic_maxpower = 2*c->ic_maxregpower; 1571 if (flags & IEEE80211_CHAN_2GHZ) { 1572 /* G =>'s B is supported */ 1573 c->ic_flags = IEEE80211_CHAN_B | nflags; 1574 1575 c = &ic->ic_channels[ic->ic_nchans++]; 1576 c[0] = c[-1]; 1577 c->ic_flags = IEEE80211_CHAN_G | nflags; 1578 } else { /* 5GHz band */ 1579 c->ic_flags = IEEE80211_CHAN_A | nflags; 1580 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1581 } 1582 /* XXX no constraints on using HT20 */ 1583 /* add HT20, HT40 added separately */ 1584 c = &ic->ic_channels[ic->ic_nchans++]; 1585 c[0] = c[-1]; 1586 c->ic_flags |= IEEE80211_CHAN_HT20; 1587 /* XXX NARROW =>'s 1/2 and 1/4 width? */ 1588 } 1589 } 1590 1591 static void 1592 iwn_read_eeprom_ht40(struct iwn_softc *sc, const struct iwn_chan_band *band, 1593 uint32_t flags, uint32_t addr) 1594 { 1595 struct ifnet *ifp = sc->sc_ifp; 1596 struct ieee80211com *ic = ifp->if_l2com; 1597 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1598 struct ieee80211_channel *c, *cent, *extc; 1599 int i; 1600 1601 iwn_read_prom_data(sc, addr, channels, 1602 band->nchan * sizeof (struct iwn_eeprom_chan)); 1603 1604 for (i = 0; i < band->nchan; i++) { 1605 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) || 1606 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) { 1607 DPRINTF(sc, IWN_DEBUG_RESET, 1608 "skip chan %d flags 0x%x maxpwr %d\n", 1609 band->chan[i], channels[i].flags, 1610 channels[i].maxpwr); 1611 continue; 1612 } 1613 /* 1614 * Each entry defines an HT40 channel pair; find the 1615 * center channel, then the extension channel above. 1616 */ 1617 cent = ieee80211_find_channel_byieee(ic, band->chan[i], 1618 flags & ~IEEE80211_CHAN_HT); 1619 if (cent == NULL) { /* XXX shouldn't happen */ 1620 device_printf(sc->sc_dev, 1621 "%s: no entry for channel %d\n", 1622 __func__, band->chan[i]); 1623 continue; 1624 } 1625 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1626 flags & ~IEEE80211_CHAN_HT); 1627 if (extc == NULL) { 1628 DPRINTF(sc, IWN_DEBUG_RESET, 1629 "skip chan %d, extension channel not found\n", 1630 band->chan[i]); 1631 continue; 1632 } 1633 1634 DPRINTF(sc, IWN_DEBUG_RESET, 1635 "add ht40 chan %d flags 0x%x maxpwr %d\n", 1636 band->chan[i], channels[i].flags, channels[i].maxpwr); 1637 1638 c = &ic->ic_channels[ic->ic_nchans++]; 1639 c[0] = cent[0]; 1640 c->ic_extieee = extc->ic_ieee; 1641 c->ic_flags &= ~IEEE80211_CHAN_HT; 1642 c->ic_flags |= IEEE80211_CHAN_HT40U; 1643 c = &ic->ic_channels[ic->ic_nchans++]; 1644 c[0] = extc[0]; 1645 c->ic_extieee = cent->ic_ieee; 1646 c->ic_flags &= ~IEEE80211_CHAN_HT; 1647 c->ic_flags |= IEEE80211_CHAN_HT40D; 1648 } 1649 } 1650 1651 static void 1652 iwn_read_eeprom_channels(struct iwn_softc *sc, uint32_t addr, int n) 1653 { 1654 struct ifnet *ifp = sc->sc_ifp; 1655 struct ieee80211com *ic = ifp->if_l2com; 1656 static const uint32_t iwnband_flags[] = { 1657 IEEE80211_CHAN_G, 1658 IEEE80211_CHAN_A, 1659 IEEE80211_CHAN_A, 1660 IEEE80211_CHAN_A, 1661 IEEE80211_CHAN_A, 1662 IEEE80211_CHAN_G | IEEE80211_CHAN_HT40, 1663 IEEE80211_CHAN_A | IEEE80211_CHAN_HT40 1664 }; 1665 1666 if (n < 5) 1667 iwn_read_eeprom_band(sc, &iwn_bands[n], iwnband_flags[n], addr); 1668 else 1669 iwn_read_eeprom_ht40(sc, &iwn_bands[n], iwnband_flags[n], addr); 1670 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1671 } 1672 1673 struct ieee80211_node * 1674 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1675 { 1676 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 1677 } 1678 1679 void 1680 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1681 { 1682 struct ieee80211vap *vap = ni->ni_vap; 1683 1684 ieee80211_amrr_node_init(&IWN_VAP(vap)->iv_amrr, 1685 &IWN_NODE(ni)->amn, ni); 1686 } 1687 1688 int 1689 iwn_media_change(struct ifnet *ifp) 1690 { 1691 int error = ieee80211_media_change(ifp); 1692 /* NB: only the fixed rate can change and that doesn't need a reset */ 1693 return (error == ENETRESET ? 0 : error); 1694 } 1695 1696 int 1697 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1698 { 1699 struct iwn_vap *ivp = IWN_VAP(vap); 1700 struct ieee80211com *ic = vap->iv_ic; 1701 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1702 int error; 1703 1704 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1705 ieee80211_state_name[vap->iv_state], 1706 ieee80211_state_name[nstate]); 1707 1708 IEEE80211_UNLOCK(ic); 1709 IWN_LOCK(sc); 1710 callout_stop(&sc->sc_timer_to); 1711 1712 if (nstate == IEEE80211_S_AUTH && vap->iv_state != IEEE80211_S_AUTH) { 1713 /* !AUTH -> AUTH requires adapter config */ 1714 /* Reset state to handle reassociations correctly. */ 1715 sc->rxon.associd = 0; 1716 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1717 iwn_calib_reset(sc); 1718 error = iwn_auth(sc, vap); 1719 } 1720 if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { 1721 /* 1722 * !RUN -> RUN requires setting the association id 1723 * which is done with a firmware cmd. We also defer 1724 * starting the timers until that work is done. 1725 */ 1726 error = iwn_run(sc, vap); 1727 } 1728 if (nstate == IEEE80211_S_RUN) { 1729 /* 1730 * RUN -> RUN transition; just restart the timers. 1731 */ 1732 iwn_calib_reset(sc); 1733 } 1734 IWN_UNLOCK(sc); 1735 IEEE80211_LOCK(ic); 1736 return ivp->iv_newstate(vap, nstate, arg); 1737 } 1738 1739 /* 1740 * Process an RX_PHY firmware notification. This is usually immediately 1741 * followed by an MPDU_RX_DONE notification. 1742 */ 1743 void 1744 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1745 struct iwn_rx_data *data) 1746 { 1747 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 1748 1749 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 1750 1751 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 1752 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 1753 sc->last_rx_valid = 1; 1754 } 1755 1756 static void 1757 iwn_timer_timeout(void *arg) 1758 { 1759 struct iwn_softc *sc = arg; 1760 1761 IWN_LOCK_ASSERT(sc); 1762 1763 if (sc->calib_cnt && --sc->calib_cnt == 0) { 1764 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 1765 "send statistics request"); 1766 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, NULL, 0, 1); 1767 sc->calib_cnt = 60; /* do calibration every 60s */ 1768 } 1769 iwn_watchdog(sc); /* NB: piggyback tx watchdog */ 1770 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 1771 } 1772 1773 static void 1774 iwn_calib_reset(struct iwn_softc *sc) 1775 { 1776 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 1777 sc->calib_cnt = 60; /* do calibration every 60s */ 1778 } 1779 1780 static __inline int 1781 maprate(int iwnrate) 1782 { 1783 switch (iwnrate) { 1784 /* CCK rates */ 1785 case 10: return 2; 1786 case 20: return 4; 1787 case 55: return 11; 1788 case 110: return 22; 1789 /* OFDM rates */ 1790 case 0xd: return 12; 1791 case 0xf: return 18; 1792 case 0x5: return 24; 1793 case 0x7: return 36; 1794 case 0x9: return 48; 1795 case 0xb: return 72; 1796 case 0x1: return 96; 1797 case 0x3: return 108; 1798 /* XXX MCS */ 1799 } 1800 /* unknown rate: should not happen */ 1801 return 0; 1802 } 1803 1804 /* 1805 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 1806 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 1807 */ 1808 void 1809 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1810 struct iwn_rx_data *data) 1811 { 1812 const struct iwn_hal *hal = sc->sc_hal; 1813 struct ifnet *ifp = sc->sc_ifp; 1814 struct ieee80211com *ic = ifp->if_l2com; 1815 struct iwn_rx_ring *ring = &sc->rxq; 1816 struct ieee80211_frame *wh; 1817 struct ieee80211_node *ni; 1818 struct mbuf *m, *m1; 1819 struct iwn_rx_stat *stat; 1820 caddr_t head; 1821 bus_addr_t paddr; 1822 uint32_t flags; 1823 int error, len, rssi, nf; 1824 1825 if (desc->type == IWN_MPDU_RX_DONE) { 1826 /* Check for prior RX_PHY notification. */ 1827 if (!sc->last_rx_valid) { 1828 DPRINTF(sc, IWN_DEBUG_ANY, 1829 "%s: missing AMPDU_RX_START\n", __func__); 1830 ifp->if_ierrors++; 1831 return; 1832 } 1833 sc->last_rx_valid = 0; 1834 stat = &sc->last_rx_stat; 1835 } else 1836 stat = (struct iwn_rx_stat *)(desc + 1); 1837 1838 bus_dmamap_sync(ring->desc_dma.tag, data->map, BUS_DMASYNC_POSTREAD); 1839 1840 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 1841 device_printf(sc->sc_dev, 1842 "%s: invalid rx statistic header, len %d\n", 1843 __func__, stat->cfg_phy_len); 1844 ifp->if_ierrors++; 1845 return; 1846 } 1847 if (desc->type == IWN_MPDU_RX_DONE) { 1848 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 1849 head = (caddr_t)(mpdu + 1); 1850 len = le16toh(mpdu->len); 1851 } else { 1852 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 1853 len = le16toh(stat->len); 1854 } 1855 1856 flags = le32toh(*(uint32_t *)(head + len)); 1857 1858 /* Discard frames with a bad FCS early. */ 1859 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 1860 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n", 1861 __func__, flags); 1862 ifp->if_ierrors++; 1863 return; 1864 } 1865 /* Discard frames that are too short. */ 1866 if (len < sizeof (*wh)) { 1867 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 1868 __func__, len); 1869 ifp->if_ierrors++; 1870 return; 1871 } 1872 1873 /* XXX don't need mbuf, just dma buffer */ 1874 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1875 if (m1 == NULL) { 1876 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1877 __func__); 1878 ifp->if_ierrors++; 1879 return; 1880 } 1881 error = bus_dmamap_load(ring->desc_dma.tag, data->map, 1882 mtod(m1, caddr_t), MJUMPAGESIZE, 1883 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1884 if (error != 0 && error != EFBIG) { 1885 device_printf(sc->sc_dev, 1886 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1887 m_freem(m1); 1888 ifp->if_ierrors++; 1889 return; 1890 } 1891 1892 m = data->m; 1893 data->m = m1; 1894 /* Update RX descriptor. */ 1895 ring->desc[ring->cur] = htole32(paddr >> 8); 1896 bus_dmamap_sync(ring->desc_dma.tag, data->map, BUS_DMASYNC_PREWRITE); 1897 1898 /* Finalize mbuf. */ 1899 m->m_pkthdr.rcvif = ifp; 1900 m->m_data = head; 1901 m->m_pkthdr.len = m->m_len = len; 1902 1903 rssi = hal->get_rssi(sc, stat); 1904 1905 /* Grab a reference to the source node. */ 1906 wh = mtod(m, struct ieee80211_frame *); 1907 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 1908 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 1909 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 1910 1911 if (ieee80211_radiotap_active(ic)) { 1912 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 1913 1914 tap->wr_tsft = htole64(stat->tstamp); 1915 tap->wr_flags = 0; 1916 if (stat->flags & htole16(IWN_RXON_SHPREAMBLE)) 1917 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 1918 tap->wr_rate = maprate(stat->rate); 1919 tap->wr_dbm_antsignal = rssi; 1920 tap->wr_dbm_antnoise = nf; 1921 } 1922 1923 IWN_UNLOCK(sc); 1924 1925 /* Send the frame to the 802.11 layer. */ 1926 if (ni != NULL) { 1927 (void) ieee80211_input(ni, m, rssi - nf, nf); 1928 /* Node is no longer needed. */ 1929 ieee80211_free_node(ni); 1930 } else 1931 (void) ieee80211_input_all(ic, m, rssi - nf, nf); 1932 1933 IWN_LOCK(sc); 1934 } 1935 1936 /* 1937 * Process a CALIBRATION_RESULT notification sent by the initialization 1938 * firmware on response to a CMD_CALIB_CONFIG command (5000 only.) 1939 */ 1940 void 1941 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1942 struct iwn_rx_data *data) 1943 { 1944 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 1945 int len, idx = -1; 1946 1947 /* Runtime firmware should not send such a notification. */ 1948 if (!(sc->sc_flags & IWN_FLAG_FIRST_BOOT)) 1949 return; 1950 1951 len = (le32toh(desc->len) & 0x3fff) - 4; 1952 1953 switch (calib->code) { 1954 case IWN5000_PHY_CALIB_DC: 1955 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 1956 idx = 0; 1957 break; 1958 case IWN5000_PHY_CALIB_LO: 1959 idx = 1; 1960 break; 1961 case IWN5000_PHY_CALIB_TX_IQ: 1962 idx = 2; 1963 break; 1964 case IWN5000_PHY_CALIB_TX_IQ_PERD: 1965 if (sc->hw_type != IWN_HW_REV_TYPE_5150) 1966 idx = 3; 1967 break; 1968 case IWN5000_PHY_CALIB_BASE_BAND: 1969 idx = 4; 1970 break; 1971 } 1972 if (idx == -1) /* Ignore other results. */ 1973 return; 1974 1975 /* Save calibration result. */ 1976 if (sc->calibcmd[idx].buf != NULL) 1977 free(sc->calibcmd[idx].buf, M_DEVBUF); 1978 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 1979 if (sc->calibcmd[idx].buf == NULL) { 1980 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1981 "not enough memory for calibration result %d\n", 1982 calib->code); 1983 return; 1984 } 1985 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1986 "saving calibration result code=%d len=%d\n", calib->code, len); 1987 sc->calibcmd[idx].len = len; 1988 memcpy(sc->calibcmd[idx].buf, calib, len); 1989 } 1990 1991 /* 1992 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 1993 * The latter is sent by the firmware after each received beacon. 1994 */ 1995 void 1996 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1997 struct iwn_rx_data *data) 1998 { 1999 const struct iwn_hal *hal = sc->sc_hal; 2000 struct ifnet *ifp = sc->sc_ifp; 2001 struct ieee80211com *ic = ifp->if_l2com; 2002 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2003 struct iwn_calib_state *calib = &sc->calib; 2004 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2005 int temp; 2006 2007 /* Beacon stats are meaningful only when associated and not scanning. */ 2008 if (vap->iv_state != IEEE80211_S_RUN || 2009 (ic->ic_flags & IEEE80211_F_SCAN)) 2010 return; 2011 2012 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type); 2013 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */ 2014 2015 /* Test if temperature has changed. */ 2016 if (stats->general.temp != sc->rawtemp) { 2017 /* Convert "raw" temperature to degC. */ 2018 sc->rawtemp = stats->general.temp; 2019 temp = hal->get_temperature(sc); 2020 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2021 __func__, temp); 2022 2023 /* Update TX power if need be (4965AGN only.) */ 2024 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2025 iwn4965_power_calibration(sc, temp); 2026 } 2027 2028 if (desc->type != IWN_BEACON_STATISTICS) 2029 return; /* Reply to a statistics request. */ 2030 2031 sc->noise = iwn_get_noise(&stats->rx.general); 2032 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2033 2034 /* Test that RSSI and noise are present in stats report. */ 2035 if (le32toh(stats->rx.general.flags) != 1) { 2036 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2037 "received statistics without RSSI"); 2038 return; 2039 } 2040 2041 if (calib->state == IWN_CALIB_STATE_ASSOC) 2042 iwn_collect_noise(sc, &stats->rx.general); 2043 else if (calib->state == IWN_CALIB_STATE_RUN) 2044 iwn_tune_sensitivity(sc, &stats->rx); 2045 } 2046 2047 /* 2048 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2049 * and 5000 adapters have different incompatible TX status formats. 2050 */ 2051 void 2052 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2053 struct iwn_rx_data *data) 2054 { 2055 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2056 2057 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2058 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2059 __func__, desc->qid, desc->idx, stat->retrycnt, 2060 stat->killcnt, stat->rate, le16toh(stat->duration), 2061 le32toh(stat->status)); 2062 iwn_tx_done(sc, desc, stat->retrycnt, le32toh(stat->status) & 0xff); 2063 } 2064 2065 void 2066 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2067 struct iwn_rx_data *data) 2068 { 2069 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2070 2071 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2072 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2073 __func__, desc->qid, desc->idx, stat->retrycnt, 2074 stat->killcnt, stat->rate, le16toh(stat->duration), 2075 le32toh(stat->status)); 2076 2077 /* Reset TX scheduler slot. */ 2078 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2079 iwn_tx_done(sc, desc, stat->retrycnt, le16toh(stat->status) & 0xff); 2080 } 2081 2082 /* 2083 * Adapter-independent backend for TX_DONE firmware notifications. 2084 */ 2085 void 2086 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int retrycnt, 2087 uint8_t status) 2088 { 2089 struct ifnet *ifp = sc->sc_ifp; 2090 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2091 struct iwn_tx_data *data = &ring->data[desc->idx]; 2092 struct mbuf *m; 2093 struct ieee80211_node *ni; 2094 2095 KASSERT(data->ni != NULL, ("no node")); 2096 2097 /* Unmap and free mbuf. */ 2098 bus_dmamap_sync(ring->desc_dma.tag, data->map, BUS_DMASYNC_POSTWRITE); 2099 bus_dmamap_unload(ring->desc_dma.tag, data->map); 2100 m = data->m, data->m = NULL; 2101 ni = data->ni, data->ni = NULL; 2102 2103 if (m->m_flags & M_TXCB) { 2104 /* 2105 * Channels marked for "radar" require traffic to be received 2106 * to unlock before we can transmit. Until traffic is seen 2107 * any attempt to transmit is returned immediately with status 2108 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2109 * happen on first authenticate after scanning. To workaround 2110 * this we ignore a failure of this sort in AUTH state so the 2111 * 802.11 layer will fall back to using a timeout to wait for 2112 * the AUTH reply. This allows the firmware time to see 2113 * traffic so a subsequent retry of AUTH succeeds. It's 2114 * unclear why the firmware does not maintain state for 2115 * channels recently visited as this would allow immediate 2116 * use of the channel after a scan (where we see traffic). 2117 */ 2118 if (status == IWN_TX_FAIL_TX_LOCKED && 2119 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2120 ieee80211_process_callback(ni, m, 0); 2121 else 2122 ieee80211_process_callback(ni, m, 2123 (status & IWN_TX_FAIL) != 0); 2124 } 2125 m_freem(m); 2126 ieee80211_free_node(ni); 2127 2128 sc->sc_tx_timer = 0; 2129 if (--ring->queued < IWN_TX_RING_LOMARK) { 2130 sc->qfullmsk &= ~(1 << ring->qid); 2131 if (sc->qfullmsk == 0 && 2132 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2133 printf("hier :(\n"); 2134 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2135 iwn_start_locked(ifp); 2136 } 2137 } 2138 } 2139 2140 /* 2141 * Process a "command done" firmware notification. This is where we wakeup 2142 * processes waiting for a synchronous command completion. 2143 */ 2144 void 2145 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2146 { 2147 struct iwn_tx_ring *ring = &sc->txq[4]; 2148 struct iwn_tx_data *data; 2149 2150 if ((desc->qid & 0xf) != 4) 2151 return; /* Not a command ack. */ 2152 2153 data = &ring->data[desc->idx]; 2154 2155 /* If the command was mapped in an mbuf, free it. */ 2156 if (data->m != NULL) { 2157 bus_dmamap_sync(ring->desc_dma.tag, data->map, 2158 BUS_DMASYNC_POSTWRITE); 2159 bus_dmamap_unload(ring->desc_dma.tag, data->map); 2160 m_freem(data->m); 2161 data->m = NULL; 2162 } 2163 wakeup(&ring->desc[desc->idx]); 2164 } 2165 2166 /* 2167 * Process an INT_FH_RX or INT_SW_RX interrupt. 2168 */ 2169 void 2170 iwn_notif_intr(struct iwn_softc *sc) 2171 { 2172 struct ifnet *ifp = sc->sc_ifp; 2173 struct ieee80211com *ic = ifp->if_l2com; 2174 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2175 uint16_t hw; 2176 2177 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 2178 BUS_DMASYNC_POSTREAD); 2179 2180 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2181 while (sc->rxq.cur != hw) { 2182 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2183 struct iwn_rx_desc *desc; 2184 2185 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2186 BUS_DMASYNC_PREWRITE); 2187 desc = mtod(data->m, struct iwn_rx_desc *); 2188 2189 DPRINTF(sc, IWN_DEBUG_RECV, 2190 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 2191 __func__, desc->qid & 0xf, desc->idx, desc->flags, 2192 desc->type, iwn_intr_str(desc->type), 2193 le16toh(desc->len)); 2194 2195 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2196 iwn_cmd_done(sc, desc); 2197 2198 switch (desc->type) { 2199 case IWN_RX_PHY: 2200 iwn_rx_phy(sc, desc, data); 2201 break; 2202 2203 case IWN_RX_DONE: /* 4965AGN only. */ 2204 case IWN_MPDU_RX_DONE: 2205 /* An 802.11 frame has been received. */ 2206 iwn_rx_done(sc, desc, data); 2207 break; 2208 2209 case IWN_TX_DONE: 2210 /* An 802.11 frame has been transmitted. */ 2211 sc->sc_hal->tx_done(sc, desc, data); 2212 break; 2213 2214 case IWN_RX_STATISTICS: 2215 case IWN_BEACON_STATISTICS: 2216 iwn_rx_statistics(sc, desc, data); 2217 break; 2218 2219 case IWN_BEACON_MISSED: 2220 { 2221 struct iwn_beacon_missed *miss = 2222 (struct iwn_beacon_missed *)(desc + 1); 2223 int misses = le32toh(miss->consecutive); 2224 2225 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2226 BUS_DMASYNC_PREWRITE); 2227 DPRINTF(sc, IWN_DEBUG_STATE, 2228 "%s: beacons missed %d/%d\n", __func__, 2229 misses, le32toh(miss->total)); 2230 2231 /* 2232 * If more than 5 consecutive beacons are missed, 2233 * reinitialize the sensitivity state machine. 2234 */ 2235 if (vap->iv_state == IEEE80211_S_RUN && misses > 5) 2236 (void) iwn_init_sensitivity(sc); 2237 if (misses >= vap->iv_bmissthreshold) 2238 ieee80211_beacon_miss(ic); 2239 break; 2240 } 2241 case IWN_UC_READY: 2242 { 2243 struct iwn_ucode_info *uc = 2244 (struct iwn_ucode_info *)(desc + 1); 2245 2246 /* The microcontroller is ready. */ 2247 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2248 BUS_DMASYNC_PREWRITE); 2249 DPRINTF(sc, IWN_DEBUG_RESET, 2250 "microcode alive notification version=%d.%d " 2251 "subtype=%x alive=%x\n", uc->major, uc->minor, 2252 uc->subtype, le32toh(uc->valid)); 2253 2254 if (le32toh(uc->valid) != 1) { 2255 device_printf(sc->sc_dev, 2256 "microcontroller initialization failed"); 2257 break; 2258 } 2259 if (uc->subtype == IWN_UCODE_INIT) { 2260 /* Save microcontroller's report. */ 2261 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2262 } 2263 /* Save the address of the error log in SRAM. */ 2264 sc->errptr = le32toh(uc->errptr); 2265 break; 2266 } 2267 case IWN_STATE_CHANGED: 2268 { 2269 uint32_t *status = (uint32_t *)(desc + 1); 2270 2271 /* 2272 * State change allows hardware switch change to be 2273 * noted. However, we handle this in iwn_intr as we 2274 * get both the enable/disble intr. 2275 */ 2276 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2277 BUS_DMASYNC_PREWRITE); 2278 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 2279 le32toh(*status)); 2280 break; 2281 } 2282 case IWN_START_SCAN: 2283 { 2284 struct iwn_start_scan *scan = 2285 (struct iwn_start_scan *)(desc + 1); 2286 2287 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2288 BUS_DMASYNC_PREWRITE); 2289 DPRINTF(sc, IWN_DEBUG_ANY, 2290 "%s: scanning channel %d status %x\n", 2291 __func__, scan->chan, le32toh(scan->status)); 2292 break; 2293 } 2294 case IWN_STOP_SCAN: 2295 { 2296 struct iwn_stop_scan *scan = 2297 (struct iwn_stop_scan *)(desc + 1); 2298 2299 bus_dmamap_sync(sc->rxq.stat_dma.tag, data->map, 2300 BUS_DMASYNC_PREWRITE); 2301 DPRINTF(sc, IWN_DEBUG_STATE, 2302 "scan finished nchan=%d status=%d chan=%d\n", 2303 scan->nchan, scan->status, scan->chan); 2304 2305 ieee80211_scan_next(vap); 2306 break; 2307 } 2308 case IWN5000_CALIBRATION_RESULT: 2309 iwn5000_rx_calib_results(sc, desc, data); 2310 break; 2311 2312 case IWN5000_CALIBRATION_DONE: 2313 wakeup(sc); 2314 break; 2315 } 2316 2317 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2318 } 2319 2320 /* Tell the firmware what we have processed. */ 2321 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2322 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2323 } 2324 2325 /* 2326 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2327 * from power-down sleep mode. 2328 */ 2329 void 2330 iwn_wakeup_intr(struct iwn_softc *sc) 2331 { 2332 int qid; 2333 2334 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 2335 __func__); 2336 2337 /* Wakeup RX and TX rings. */ 2338 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2339 for (qid = 0; qid < 6; qid++) { 2340 struct iwn_tx_ring *ring = &sc->txq[qid]; 2341 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2342 } 2343 } 2344 2345 void 2346 iwn_rftoggle_intr(struct iwn_softc *sc) 2347 { 2348 struct ifnet *ifp = sc->sc_ifp; 2349 struct ieee80211com *ic = ifp->if_l2com; 2350 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 2351 2352 IWN_LOCK_ASSERT(sc); 2353 2354 device_printf(sc->sc_dev, "RF switch: radio %s\n", 2355 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2356 if (tmp & IWN_GP_CNTRL_RFKILL) 2357 ieee80211_runtask(ic, &sc->sc_radioon_task); 2358 else 2359 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2360 } 2361 2362 /* 2363 * Dump the error log of the firmware when a firmware panic occurs. Although 2364 * we can't debug the firmware because it is neither open source nor free, it 2365 * can help us to identify certain classes of problems. 2366 */ 2367 void 2368 iwn_fatal_intr(struct iwn_softc *sc, uint32_t r1, uint32_t r2) 2369 { 2370 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0])) 2371 const struct iwn_hal *hal = sc->sc_hal; 2372 struct ifnet *ifp = sc->sc_ifp; 2373 struct ieee80211com *ic = ifp->if_l2com; 2374 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2375 struct iwn_fw_dump dump; 2376 int i; 2377 2378 IWN_LOCK_ASSERT(sc); 2379 2380 /* Check that the error log address is valid. */ 2381 if (sc->errptr < IWN_FW_DATA_BASE || 2382 sc->errptr + sizeof (dump) > 2383 IWN_FW_DATA_BASE + hal->fw_data_maxsz) { 2384 printf("%s: bad firmware error log address 0x%08x\n", 2385 __func__, sc->errptr); 2386 return; 2387 } 2388 if (iwn_nic_lock(sc) != 0) { 2389 printf("%s: could not read firmware error log\n", 2390 __func__); 2391 return; 2392 } 2393 /* Read firmware error log from SRAM. */ 2394 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2395 sizeof (dump) / sizeof (uint32_t)); 2396 iwn_nic_unlock(sc); 2397 2398 if (dump.valid == 0) { 2399 printf("%s: firmware error log is empty\n", 2400 __func__); 2401 return; 2402 } 2403 printf("firmware error log:\n"); 2404 printf(" error type = \"%s\" (0x%08X)\n", 2405 (dump.id < nitems(iwn_fw_errmsg)) ? 2406 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2407 dump.id); 2408 printf(" program counter = 0x%08X\n", dump.pc); 2409 printf(" source line = 0x%08X\n", dump.src_line); 2410 printf(" error data = 0x%08X%08X\n", 2411 dump.error_data[0], dump.error_data[1]); 2412 printf(" branch link = 0x%08X%08X\n", 2413 dump.branch_link[0], dump.branch_link[1]); 2414 printf(" interrupt link = 0x%08X%08X\n", 2415 dump.interrupt_link[0], dump.interrupt_link[1]); 2416 printf(" time = %u\n", dump.time[0]); 2417 2418 /* Dump driver status (TX and RX rings) while we're here. */ 2419 printf("driver status:\n"); 2420 for (i = 0; i < hal->ntxqs; i++) { 2421 struct iwn_tx_ring *ring = &sc->txq[i]; 2422 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2423 i, ring->qid, ring->cur, ring->queued); 2424 } 2425 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2426 2427 if (vap != NULL) 2428 ieee80211_cancel_scan(vap); 2429 ieee80211_runtask(ic, &sc->sc_reinit_task); 2430 } 2431 2432 void 2433 iwn_intr(void *arg) 2434 { 2435 struct iwn_softc *sc = arg; 2436 struct ifnet *ifp = sc->sc_ifp; 2437 uint32_t r1, r2; 2438 2439 IWN_LOCK(sc); 2440 2441 /* Disable interrupts. */ 2442 IWN_WRITE(sc, IWN_MASK, 0); 2443 2444 r1 = IWN_READ(sc, IWN_INT); 2445 r2 = IWN_READ(sc, IWN_FH_INT); 2446 2447 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 2448 2449 if (r1 == 0 && r2 == 0) { 2450 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 2451 IWN_WRITE(sc, IWN_MASK, IWN_INT_MASK); 2452 goto done; /* Interrupt not for us. */ 2453 } 2454 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2455 goto done; /* Hardware gone! */ 2456 2457 /* Acknowledge interrupts. */ 2458 IWN_WRITE(sc, IWN_INT, r1); 2459 IWN_WRITE(sc, IWN_FH_INT, r2); 2460 2461 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 2462 2463 if (r1 & IWN_INT_RF_TOGGLED) { 2464 iwn_rftoggle_intr(sc); 2465 } 2466 if (r1 & IWN_INT_CT_REACHED) { 2467 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 2468 __func__); 2469 /* XXX Reduce TX power? */ 2470 } 2471 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2472 iwn_fatal_intr(sc, r1, r2); 2473 goto done; 2474 } 2475 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) || 2476 (r2 & IWN_FH_INT_RX)) 2477 iwn_notif_intr(sc); 2478 2479 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) 2480 wakeup(sc); /* FH DMA transfer completed. */ 2481 2482 if (r1 & IWN_INT_ALIVE) 2483 wakeup(sc); /* Firmware is alive. */ 2484 2485 if (r1 & IWN_INT_WAKEUP) 2486 iwn_wakeup_intr(sc); 2487 2488 /* Re-enable interrupts. */ 2489 IWN_WRITE(sc, IWN_MASK, IWN_INT_MASK); 2490 2491 done: 2492 IWN_UNLOCK(sc); 2493 } 2494 2495 /* 2496 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2497 * 5000 adapters use a slightly different format.) 2498 */ 2499 void 2500 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2501 uint16_t len) 2502 { 2503 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2504 2505 *w = htole16(len + 8); 2506 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2507 BUS_DMASYNC_PREWRITE); 2508 if (idx < IWN4965_SCHEDSZ) { 2509 *(w + IWN_TX_RING_COUNT) = *w; 2510 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2511 BUS_DMASYNC_PREWRITE); 2512 } 2513 } 2514 2515 void 2516 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2517 uint16_t len) 2518 { 2519 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2520 2521 *w = htole16(id << 12 | (len + 8)); 2522 2523 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2524 BUS_DMASYNC_PREWRITE); 2525 if (idx < IWN_SCHED_WINSZ) { 2526 *(w + IWN_TX_RING_COUNT) = *w; 2527 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2528 BUS_DMASYNC_PREWRITE); 2529 } 2530 } 2531 2532 void 2533 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2534 { 2535 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2536 2537 *w = (*w & htole16(0xf000)) | htole16(1); 2538 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2539 BUS_DMASYNC_PREWRITE); 2540 if (idx < IWN_SCHED_WINSZ) { 2541 *(w + IWN_TX_RING_COUNT) = *w; 2542 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2543 BUS_DMASYNC_PREWRITE); 2544 } 2545 } 2546 2547 /* Determine if a given rate is CCK or OFDM. */ 2548 #define IWN_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 2549 2550 static const struct iwn_rate * 2551 iwn_plcp_signal(int rate) { 2552 int i; 2553 2554 for (i = 0; i < IWN_RIDX_MAX + 1; i++) { 2555 if (rate == iwn_rates[i].rate) 2556 return &iwn_rates[i]; 2557 } 2558 2559 return &iwn_rates[0]; 2560 } 2561 2562 int 2563 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 2564 struct iwn_tx_ring *ring) 2565 { 2566 const struct iwn_hal *hal = sc->sc_hal; 2567 const struct ieee80211_txparam *tp; 2568 const struct iwn_rate *rinfo; 2569 struct ieee80211vap *vap = ni->ni_vap; 2570 struct ieee80211com *ic = ni->ni_ic; 2571 struct iwn_node *wn = (void *)ni; 2572 struct iwn_tx_desc *desc; 2573 struct iwn_tx_data *data; 2574 struct iwn_tx_cmd *cmd; 2575 struct iwn_cmd_data *tx; 2576 struct ieee80211_frame *wh; 2577 struct ieee80211_key *k = NULL; 2578 struct mbuf *mnew; 2579 bus_addr_t paddr; 2580 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 2581 uint32_t flags; 2582 u_int hdrlen; 2583 int totlen, error, pad, nsegs, i, rate; 2584 uint8_t type, txant; 2585 2586 IWN_LOCK_ASSERT(sc); 2587 2588 wh = mtod(m, struct ieee80211_frame *); 2589 hdrlen = ieee80211_anyhdrsize(wh); 2590 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2591 2592 desc = &ring->desc[ring->cur]; 2593 data = &ring->data[ring->cur]; 2594 2595 /* Choose a TX rate index. */ 2596 /* XXX ni_chan */ 2597 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 2598 if (type == IEEE80211_FC0_TYPE_MGT) 2599 rate = tp->mgmtrate; 2600 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 2601 rate = tp->mcastrate; 2602 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2603 rate = tp->ucastrate; 2604 else { 2605 (void) ieee80211_amrr_choose(ni, &IWN_NODE(ni)->amn); 2606 rate = ni->ni_txrate; 2607 } 2608 rinfo = iwn_plcp_signal(rate); 2609 2610 /* Encrypt the frame if need be. */ 2611 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2612 k = ieee80211_crypto_encap(ni, m); 2613 if (k == NULL) { 2614 m_freem(m); 2615 return ENOBUFS; 2616 } 2617 /* Packet header may have moved, reset our local pointer. */ 2618 wh = mtod(m, struct ieee80211_frame *); 2619 } 2620 totlen = m->m_pkthdr.len; 2621 2622 if (ieee80211_radiotap_active_vap(vap)) { 2623 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2624 2625 tap->wt_flags = 0; 2626 tap->wt_rate = rate; 2627 if (k != NULL) 2628 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2629 2630 ieee80211_radiotap_tx(vap, m); 2631 } 2632 2633 /* Prepare TX firmware command. */ 2634 cmd = &ring->cmd[ring->cur]; 2635 cmd->code = IWN_CMD_TX_DATA; 2636 cmd->flags = 0; 2637 cmd->qid = ring->qid; 2638 cmd->idx = ring->cur; 2639 2640 tx = (struct iwn_cmd_data *)cmd->data; 2641 /* NB: No need to clear tx, all fields are reinitialized here. */ 2642 tx->scratch = 0; /* clear "scratch" area */ 2643 2644 flags = 0; 2645 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) 2646 flags |= IWN_TX_NEED_ACK; 2647 if ((wh->i_fc[0] & 2648 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2649 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2650 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2651 2652 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2653 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2654 2655 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2656 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2657 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2658 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2659 flags |= IWN_TX_NEED_RTS; 2660 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2661 IWN_RATE_IS_OFDM(rate)) { 2662 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2663 flags |= IWN_TX_NEED_CTS; 2664 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2665 flags |= IWN_TX_NEED_RTS; 2666 } 2667 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2668 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2669 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2670 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2671 flags |= IWN_TX_NEED_PROTECTION; 2672 } else 2673 flags |= IWN_TX_FULL_TXOP; 2674 } 2675 } else 2676 2677 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2678 type != IEEE80211_FC0_TYPE_DATA) 2679 tx->id = hal->broadcast_id; 2680 else 2681 tx->id = wn->id; 2682 2683 if (type == IEEE80211_FC0_TYPE_MGT) { 2684 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2685 2686 /* Tell HW to set timestamp in probe responses. */ 2687 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2688 flags |= IWN_TX_INSERT_TSTAMP; 2689 2690 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2691 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2692 tx->timeout = htole16(3); 2693 else 2694 tx->timeout = htole16(2); 2695 } else 2696 tx->timeout = htole16(0); 2697 2698 if (hdrlen & 3) { 2699 /* First segment's length must be a multiple of 4. */ 2700 flags |= IWN_TX_NEED_PADDING; 2701 pad = 4 - (hdrlen & 3); 2702 } else 2703 pad = 0; 2704 2705 tx->len = htole16(totlen); 2706 tx->tid = 0; 2707 tx->rts_ntries = 60; /* XXX? */ 2708 tx->data_ntries = 15; /* XXX? */ 2709 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2710 tx->plcp = rinfo->plcp; 2711 tx->rflags = rinfo->flags; 2712 if (tx->id == hal->broadcast_id) { 2713 /* XXX Alternate between antenna A and B? */ 2714 txant = IWN_LSB(sc->txantmsk); 2715 tx->rflags |= IWN_RFLAG_ANT(txant); 2716 } else 2717 flags |= IWN_TX_LINKQ; 2718 2719 /* Set physical address of "scratch area". */ 2720 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); 2721 tx->loaddr = htole32(IWN_LOADDR(paddr)); 2722 tx->hiaddr = IWN_HIADDR(paddr); 2723 2724 /* Copy 802.11 header in TX command. */ 2725 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 2726 2727 /* Trim 802.11 header. */ 2728 m_adj(m, hdrlen); 2729 tx->security = 0; 2730 tx->flags = htole32(flags); 2731 2732 error = bus_dmamap_load_mbuf_sg(ring->desc_dma.tag, data->map, m, segs, 2733 &nsegs, BUS_DMA_NOWAIT); 2734 if (error != 0) { 2735 if (error == EFBIG) { 2736 /* too many fragments, linearize */ 2737 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 2738 if (mnew == NULL) { 2739 IWN_UNLOCK(sc); 2740 device_printf(sc->sc_dev, 2741 "%s: could not defrag mbuf\n", __func__); 2742 m_freem(m); 2743 return ENOBUFS; 2744 } 2745 m = mnew; 2746 error = bus_dmamap_load_mbuf_sg(ring->desc_dma.tag, 2747 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 2748 } 2749 if (error != 0) { 2750 IWN_UNLOCK(sc); 2751 device_printf(sc->sc_dev, 2752 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 2753 __func__, error); 2754 m_freem(m); 2755 return error; 2756 } 2757 } 2758 2759 data->m = m; 2760 data->ni = ni; 2761 2762 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2763 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 2764 2765 /* Fill TX descriptor. */ 2766 desc->nsegs = 1 + nsegs; 2767 /* First DMA segment is used by the TX command. */ 2768 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 2769 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | 2770 (4 + sizeof (*tx) + hdrlen + pad) << 4); 2771 /* Other DMA segments are for data payload. */ 2772 for (i = 1; i <= nsegs; i++) { 2773 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 2774 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 2775 segs[i - 1].ds_len << 4); 2776 } 2777 2778 bus_dmamap_sync(ring->desc_dma.tag, data->map, BUS_DMASYNC_PREWRITE); 2779 bus_dmamap_sync(ring->desc_dma.tag, ring->cmd_dma.map, 2780 BUS_DMASYNC_PREWRITE); 2781 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2782 BUS_DMASYNC_PREWRITE); 2783 2784 /* Update TX scheduler. */ 2785 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 2786 2787 /* Kick TX ring. */ 2788 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 2789 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2790 2791 /* Mark TX ring as full if we reach a certain threshold. */ 2792 if (++ring->queued > IWN_TX_RING_HIMARK) 2793 sc->qfullmsk |= 1 << ring->qid; 2794 2795 return 0; 2796 } 2797 2798 static int 2799 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m0, 2800 struct ieee80211_node *ni, struct iwn_tx_ring *ring, 2801 const struct ieee80211_bpf_params *params) 2802 { 2803 const struct iwn_hal *hal = sc->sc_hal; 2804 const struct iwn_rate *rinfo; 2805 struct ifnet *ifp = sc->sc_ifp; 2806 struct ieee80211vap *vap = ni->ni_vap; 2807 struct ieee80211com *ic = ifp->if_l2com; 2808 struct iwn_tx_cmd *cmd; 2809 struct iwn_cmd_data *tx; 2810 struct ieee80211_frame *wh; 2811 struct iwn_tx_desc *desc; 2812 struct iwn_tx_data *data; 2813 struct mbuf *mnew; 2814 bus_addr_t paddr; 2815 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 2816 uint32_t flags; 2817 u_int hdrlen; 2818 int totlen, error, pad, nsegs, i, rate; 2819 uint8_t type, txant; 2820 2821 IWN_LOCK_ASSERT(sc); 2822 2823 wh = mtod(m0, struct ieee80211_frame *); 2824 hdrlen = ieee80211_anyhdrsize(wh); 2825 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2826 2827 desc = &ring->desc[ring->cur]; 2828 data = &ring->data[ring->cur]; 2829 2830 /* Choose a TX rate index. */ 2831 rate = params->ibp_rate0; 2832 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 2833 /* XXX fall back to mcast/mgmt rate? */ 2834 m_freem(m0); 2835 return EINVAL; 2836 } 2837 rinfo = iwn_plcp_signal(rate); 2838 2839 totlen = m0->m_pkthdr.len; 2840 2841 cmd = &ring->cmd[ring->cur]; 2842 cmd->code = IWN_CMD_TX_DATA; 2843 cmd->flags = 0; 2844 cmd->qid = ring->qid; 2845 cmd->idx = ring->cur; 2846 2847 tx = (struct iwn_cmd_data *)cmd->data; 2848 /* NB: no need to bzero tx, all fields are reinitialized here */ 2849 tx->scratch = 0; /* clear "scratch" area */ 2850 2851 flags = 0; 2852 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2853 flags |= IWN_TX_NEED_ACK; 2854 if (params->ibp_flags & IEEE80211_BPF_RTS) { 2855 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2856 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2857 flags &= ~IWN_TX_NEED_RTS; 2858 flags |= IWN_TX_NEED_PROTECTION; 2859 } else 2860 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 2861 } 2862 if (params->ibp_flags & IEEE80211_BPF_CTS) { 2863 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2864 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2865 flags &= ~IWN_TX_NEED_CTS; 2866 flags |= IWN_TX_NEED_PROTECTION; 2867 } else 2868 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 2869 } 2870 if (type == IEEE80211_FC0_TYPE_MGT) { 2871 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2872 2873 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2874 flags |= IWN_TX_INSERT_TSTAMP; 2875 2876 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2877 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2878 tx->timeout = htole16(3); 2879 else 2880 tx->timeout = htole16(2); 2881 } else 2882 tx->timeout = htole16(0); 2883 2884 if (hdrlen & 3) { 2885 /* First segment's length must be a multiple of 4. */ 2886 flags |= IWN_TX_NEED_PADDING; 2887 pad = 4 - (hdrlen & 3); 2888 } else 2889 pad = 0; 2890 2891 if (ieee80211_radiotap_active_vap(vap)) { 2892 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2893 2894 tap->wt_flags = 0; 2895 tap->wt_rate = rate; 2896 2897 ieee80211_radiotap_tx(vap, m0); 2898 } 2899 2900 tx->len = htole16(totlen); 2901 tx->tid = 0; 2902 tx->id = hal->broadcast_id; 2903 tx->rts_ntries = params->ibp_try1; 2904 tx->data_ntries = params->ibp_try0; 2905 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2906 tx->plcp = rinfo->plcp; 2907 tx->rflags = rinfo->flags; 2908 if (tx->id == hal->broadcast_id) { 2909 txant = IWN_LSB(sc->txantmsk); 2910 tx->rflags |= IWN_RFLAG_ANT(txant); 2911 } else { 2912 flags |= IWN_TX_LINKQ; /* enable MRR */ 2913 } 2914 /* Set physical address of "scratch area". */ 2915 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); 2916 tx->loaddr = htole32(IWN_LOADDR(paddr)); 2917 tx->hiaddr = IWN_HIADDR(paddr); 2918 2919 /* Copy 802.11 header in TX command. */ 2920 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 2921 2922 /* Trim 802.11 header. */ 2923 m_adj(m0, hdrlen); 2924 tx->security = 0; 2925 tx->flags = htole32(flags); 2926 2927 error = bus_dmamap_load_mbuf_sg(ring->desc_dma.tag, data->map, m0, segs, 2928 &nsegs, BUS_DMA_NOWAIT); 2929 if (error != 0) { 2930 if (error == EFBIG) { 2931 /* Too many fragments, linearize. */ 2932 mnew = m_collapse(m0, M_DONTWAIT, IWN_MAX_SCATTER); 2933 if (mnew == NULL) { 2934 IWN_UNLOCK(sc); 2935 device_printf(sc->sc_dev, 2936 "%s: could not defrag mbuf\n", __func__); 2937 m_freem(m0); 2938 return ENOBUFS; 2939 } 2940 m0 = mnew; 2941 error = bus_dmamap_load_mbuf_sg(ring->desc_dma.tag, 2942 data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 2943 } 2944 if (error != 0) { 2945 IWN_UNLOCK(sc); 2946 device_printf(sc->sc_dev, 2947 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 2948 __func__, error); 2949 m_freem(m0); 2950 return error; 2951 } 2952 } 2953 2954 data->m = m0; 2955 data->ni = ni; 2956 2957 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2958 __func__, ring->qid, ring->cur, m0->m_pkthdr.len, nsegs); 2959 2960 /* Fill TX descriptor. */ 2961 desc->nsegs = 1 + nsegs; 2962 /* First DMA segment is used by the TX command. */ 2963 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 2964 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | 2965 (4 + sizeof (*tx) + hdrlen + pad) << 4); 2966 /* Other DMA segments are for data payload. */ 2967 for (i = 1; i <= nsegs; i++) { 2968 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 2969 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 2970 segs[i - 1].ds_len << 4); 2971 } 2972 2973 /* Update TX scheduler. */ 2974 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 2975 2976 /* Kick TX ring. */ 2977 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 2978 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 2979 2980 /* Mark TX ring as full if we reach a certain threshold. */ 2981 if (++ring->queued > IWN_TX_RING_HIMARK) 2982 sc->qfullmsk |= 1 << ring->qid; 2983 2984 return 0; 2985 } 2986 2987 static int 2988 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2989 const struct ieee80211_bpf_params *params) 2990 { 2991 struct ieee80211com *ic = ni->ni_ic; 2992 struct ifnet *ifp = ic->ic_ifp; 2993 struct iwn_softc *sc = ifp->if_softc; 2994 struct iwn_tx_ring *txq; 2995 int error = 0; 2996 2997 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2998 ieee80211_free_node(ni); 2999 m_freem(m); 3000 return ENETDOWN; 3001 } 3002 3003 IWN_LOCK(sc); 3004 if (params == NULL) 3005 txq = &sc->txq[M_WME_GETAC(m)]; 3006 else 3007 txq = &sc->txq[params->ibp_pri & 3]; 3008 3009 if (params == NULL) { 3010 /* 3011 * Legacy path; interpret frame contents to decide 3012 * precisely how to send the frame. 3013 */ 3014 error = iwn_tx_data(sc, m, ni, txq); 3015 } else { 3016 /* 3017 * Caller supplied explicit parameters to use in 3018 * sending the frame. 3019 */ 3020 error = iwn_tx_data_raw(sc, m, ni, txq, params); 3021 } 3022 if (error != 0) { 3023 /* NB: m is reclaimed on tx failure */ 3024 ieee80211_free_node(ni); 3025 ifp->if_oerrors++; 3026 } 3027 IWN_UNLOCK(sc); 3028 return error; 3029 } 3030 3031 void 3032 iwn_start(struct ifnet *ifp) 3033 { 3034 struct iwn_softc *sc = ifp->if_softc; 3035 3036 IWN_LOCK(sc); 3037 iwn_start_locked(ifp); 3038 IWN_UNLOCK(sc); 3039 } 3040 3041 void 3042 iwn_start_locked(struct ifnet *ifp) 3043 { 3044 struct iwn_softc *sc = ifp->if_softc; 3045 struct ieee80211_node *ni; 3046 struct iwn_tx_ring *txq; 3047 struct mbuf *m; 3048 int pri; 3049 3050 IWN_LOCK_ASSERT(sc); 3051 3052 for (;;) { 3053 if (sc->qfullmsk != 0) { 3054 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3055 break; 3056 } 3057 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3058 if (m == NULL) 3059 break; 3060 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3061 pri = M_WME_GETAC(m); 3062 txq = &sc->txq[pri]; 3063 if (iwn_tx_data(sc, m, ni, txq) != 0) { 3064 ifp->if_oerrors++; 3065 ieee80211_free_node(ni); 3066 break; 3067 } 3068 sc->sc_tx_timer = 5; 3069 } 3070 } 3071 3072 static void 3073 iwn_watchdog(struct iwn_softc *sc) 3074 { 3075 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { 3076 struct ifnet *ifp = sc->sc_ifp; 3077 struct ieee80211com *ic = ifp->if_l2com; 3078 3079 if_printf(ifp, "device timeout\n"); 3080 ieee80211_runtask(ic, &sc->sc_reinit_task); 3081 } 3082 } 3083 3084 int 3085 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3086 { 3087 struct iwn_softc *sc = ifp->if_softc; 3088 struct ieee80211com *ic = ifp->if_l2com; 3089 struct ifreq *ifr = (struct ifreq *) data; 3090 int error = 0, startall = 0; 3091 3092 switch (cmd) { 3093 case SIOCSIFFLAGS: 3094 IWN_LOCK(sc); 3095 if (ifp->if_flags & IFF_UP) { 3096 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3097 iwn_init_locked(sc); 3098 startall = 1; 3099 } 3100 } else { 3101 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3102 iwn_stop_locked(sc); 3103 } 3104 IWN_UNLOCK(sc); 3105 if (startall) 3106 ieee80211_start_all(ic); 3107 break; 3108 case SIOCGIFMEDIA: 3109 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3110 break; 3111 case SIOCGIFADDR: 3112 error = ether_ioctl(ifp, cmd, data); 3113 break; 3114 default: 3115 error = EINVAL; 3116 break; 3117 } 3118 return error; 3119 } 3120 3121 /* 3122 * Send a command to the firmware. 3123 */ 3124 int 3125 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3126 { 3127 const struct iwn_hal *hal = sc->sc_hal; 3128 struct iwn_tx_ring *ring = &sc->txq[4]; 3129 struct iwn_tx_desc *desc; 3130 struct iwn_tx_data *data; 3131 struct iwn_tx_cmd *cmd; 3132 struct mbuf *m; 3133 bus_addr_t paddr; 3134 int totlen, error; 3135 3136 IWN_LOCK_ASSERT(sc); 3137 3138 desc = &ring->desc[ring->cur]; 3139 data = &ring->data[ring->cur]; 3140 totlen = 4 + size; 3141 3142 if (size > sizeof cmd->data) { 3143 /* Command is too large to fit in a descriptor. */ 3144 if (totlen > MCLBYTES) 3145 return EINVAL; 3146 MGETHDR(m, M_DONTWAIT, MT_DATA); 3147 if (m == NULL) 3148 return ENOMEM; 3149 if (totlen > MHLEN) { 3150 MCLGET(m, M_DONTWAIT); 3151 if (!(m->m_flags & M_EXT)) { 3152 m_freem(m); 3153 return ENOMEM; 3154 } 3155 } 3156 cmd = mtod(m, struct iwn_tx_cmd *); 3157 error = bus_dmamap_load(ring->cmd_dma.tag, data->map, cmd, 3158 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3159 if (error != 0) { 3160 m_freem(m); 3161 return error; 3162 } 3163 data->m = m; 3164 } else { 3165 cmd = &ring->cmd[ring->cur]; 3166 paddr = data->cmd_paddr; 3167 } 3168 3169 cmd->code = code; 3170 cmd->flags = 0; 3171 cmd->qid = ring->qid; 3172 cmd->idx = ring->cur; 3173 memcpy(cmd->data, buf, size); 3174 3175 desc->nsegs = 1; 3176 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3177 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3178 3179 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 3180 __func__, iwn_intr_str(cmd->code), cmd->code, 3181 cmd->flags, cmd->qid, cmd->idx); 3182 3183 if (size > sizeof cmd->data) { 3184 bus_dmamap_sync(ring->cmd_dma.tag, data->map, 3185 BUS_DMASYNC_PREWRITE); 3186 } else { 3187 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3188 BUS_DMASYNC_PREWRITE); 3189 } 3190 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3191 BUS_DMASYNC_PREWRITE); 3192 3193 /* Update TX scheduler. */ 3194 hal->update_sched(sc, ring->qid, ring->cur, 0, 0); 3195 3196 /* Kick command ring. */ 3197 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3198 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3199 3200 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 3201 } 3202 3203 int 3204 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3205 { 3206 struct iwn4965_node_info hnode; 3207 caddr_t src, dst; 3208 3209 /* 3210 * We use the node structure for 5000 Series internally (it is 3211 * a superset of the one for 4965AGN). We thus copy the common 3212 * fields before sending the command. 3213 */ 3214 src = (caddr_t)node; 3215 dst = (caddr_t)&hnode; 3216 memcpy(dst, src, 48); 3217 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3218 memcpy(dst + 48, src + 72, 20); 3219 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3220 } 3221 3222 int 3223 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3224 { 3225 /* Direct mapping. */ 3226 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3227 } 3228 3229 static const uint8_t iwn_ridx_to_plcp[] = { 3230 10, 20, 55, 110, /* CCK */ 3231 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */ 3232 }; 3233 static const uint8_t iwn_siso_mcs_to_plcp[] = { 3234 0, 0, 0, 0, /* CCK */ 3235 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */ 3236 }; 3237 static const uint8_t iwn_mimo_mcs_to_plcp[] = { 3238 0, 0, 0, 0, /* CCK */ 3239 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */ 3240 }; 3241 static const uint8_t iwn_prev_ridx[] = { 3242 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */ 3243 0, 0, 1, 5, /* CCK */ 3244 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */ 3245 }; 3246 3247 /* 3248 * Configure hardware link parameters for the specified 3249 * node operating on the specified channel. 3250 */ 3251 int 3252 iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, 3253 const struct ieee80211_channel *c, int async) 3254 { 3255 struct iwn_cmd_link_quality linkq; 3256 int ridx, i; 3257 uint8_t txant; 3258 3259 /* Use the first valid TX antenna. */ 3260 txant = IWN_LSB(sc->txantmsk); 3261 3262 memset(&linkq, 0, sizeof linkq); 3263 linkq.id = id; 3264 linkq.antmsk_1stream = txant; 3265 linkq.antmsk_2stream = IWN_ANT_A | IWN_ANT_B; 3266 linkq.ampdu_max = 64; 3267 linkq.ampdu_threshold = 3; 3268 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3269 3270 if (IEEE80211_IS_CHAN_HT(c)) 3271 linkq.mimo = 1; 3272 3273 if (id == IWN_ID_BSS) 3274 ridx = IWN_RIDX_OFDM54; 3275 else if (IEEE80211_IS_CHAN_A(c)) 3276 ridx = IWN_RIDX_OFDM6; 3277 else 3278 ridx = IWN_RIDX_CCK1; 3279 3280 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3281 if (IEEE80211_IS_CHAN_HT40(c)) { 3282 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx] 3283 | IWN_RIDX_MCS; 3284 linkq.retry[i].rflags = IWN_RFLAG_HT 3285 | IWN_RFLAG_HT40; 3286 /* XXX shortGI */ 3287 } else if (IEEE80211_IS_CHAN_HT(c)) { 3288 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx] 3289 | IWN_RIDX_MCS; 3290 linkq.retry[i].rflags = IWN_RFLAG_HT; 3291 /* XXX shortGI */ 3292 } else { 3293 linkq.retry[i].plcp = iwn_ridx_to_plcp[ridx]; 3294 if (ridx <= IWN_RIDX_CCK11) 3295 linkq.retry[i].rflags = IWN_RFLAG_CCK; 3296 } 3297 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3298 ridx = iwn_prev_ridx[ridx]; 3299 } 3300 3301 #ifdef IWN_DEBUG 3302 if (sc->sc_debug & IWN_DEBUG_STATE) { 3303 printf("%s: set link quality for node %d, mimo %d ssmask %d\n", 3304 __func__, id, linkq.mimo, linkq.antmsk_1stream); 3305 printf("%s:", __func__); 3306 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) 3307 printf(" %d:%x", linkq.retry[i].plcp, 3308 linkq.retry[i].rflags); 3309 printf("\n"); 3310 } 3311 #endif 3312 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3313 } 3314 3315 /* 3316 * Broadcast node is used to send group-addressed and management frames. 3317 */ 3318 int 3319 iwn_add_broadcast_node(struct iwn_softc *sc, const struct ieee80211_channel *c, 3320 int async) 3321 { 3322 const struct iwn_hal *hal = sc->sc_hal; 3323 struct ifnet *ifp = sc->sc_ifp; 3324 struct iwn_node_info node; 3325 int error; 3326 3327 memset(&node, 0, sizeof node); 3328 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3329 node.id = hal->broadcast_id; 3330 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 3331 error = hal->add_node(sc, &node, async); 3332 if (error != 0) 3333 return error; 3334 3335 return iwn_set_link_quality(sc, node.id, c, async); 3336 } 3337 3338 int 3339 iwn_wme_update(struct ieee80211com *ic) 3340 { 3341 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3342 #define IWN_TXOP_TO_US(v) (v<<5) 3343 struct iwn_softc *sc = ic->ic_ifp->if_softc; 3344 struct iwn_edca_params cmd; 3345 int i; 3346 3347 memset(&cmd, 0, sizeof cmd); 3348 cmd.flags = htole32(IWN_EDCA_UPDATE); 3349 for (i = 0; i < WME_NUM_AC; i++) { 3350 const struct wmeParams *wmep = 3351 &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; 3352 cmd.ac[i].aifsn = wmep->wmep_aifsn; 3353 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin)); 3354 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax)); 3355 cmd.ac[i].txoplimit = 3356 htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit)); 3357 } 3358 IWN_LOCK(sc); 3359 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/); 3360 IWN_UNLOCK(sc); 3361 return 0; 3362 #undef IWN_TXOP_TO_US 3363 #undef IWN_EXP2 3364 } 3365 3366 void 3367 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3368 { 3369 struct iwn_cmd_led led; 3370 3371 /* Clear microcode LED ownership. */ 3372 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3373 3374 led.which = which; 3375 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3376 led.off = off; 3377 led.on = on; 3378 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3379 } 3380 3381 /* 3382 * Set the critical temperature at which the firmware will notify us. 3383 */ 3384 int 3385 iwn_set_critical_temp(struct iwn_softc *sc) 3386 { 3387 struct iwn_critical_temp crit; 3388 3389 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3390 3391 memset(&crit, 0, sizeof crit); 3392 crit.tempR = htole32(sc->critical_temp); 3393 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %u\n", 3394 crit.tempR); 3395 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3396 } 3397 3398 int 3399 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3400 { 3401 struct iwn_cmd_timing cmd; 3402 uint64_t val, mod; 3403 3404 memset(&cmd, 0, sizeof cmd); 3405 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3406 cmd.bintval = htole16(ni->ni_intval); 3407 cmd.lintval = htole16(10); 3408 3409 /* Compute remaining time until next beacon. */ 3410 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3411 mod = le64toh(cmd.tstamp) % val; 3412 cmd.binitval = htole32((uint32_t)(val - mod)); 3413 3414 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3415 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3416 3417 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3418 } 3419 3420 void 3421 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3422 { 3423 struct ifnet *ifp = sc->sc_ifp; 3424 struct ieee80211com *ic = ifp->if_l2com; 3425 3426 /* Adjust TX power if need be (delta >= 3 degC.) */ 3427 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 3428 __func__, sc->temp, temp); 3429 if (abs(temp - sc->temp) >= 3) { 3430 /* Record temperature of last calibration. */ 3431 sc->temp = temp; 3432 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 3433 } 3434 } 3435 3436 /* 3437 * Set TX power for current channel (each rate has its own power settings). 3438 * This function takes into account the regulatory information from EEPROM, 3439 * the current temperature and the current voltage. 3440 */ 3441 int 3442 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3443 int async) 3444 { 3445 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3446 #define fdivround(a, b, n) \ 3447 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3448 /* Linear interpolation. */ 3449 #define interpolate(x, x1, y1, x2, y2, n) \ 3450 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3451 3452 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3453 struct ifnet *ifp = sc->sc_ifp; 3454 struct ieee80211com *ic = ifp->if_l2com; 3455 struct iwn_ucode_info *uc = &sc->ucode_info; 3456 struct iwn4965_cmd_txpower cmd; 3457 struct iwn4965_eeprom_chan_samples *chans; 3458 int32_t vdiff, tdiff; 3459 int i, c, grp, maxpwr; 3460 const uint8_t *rf_gain, *dsp_gain; 3461 uint8_t chan; 3462 3463 /* Get channel number. */ 3464 chan = ieee80211_chan2ieee(ic, ch); 3465 3466 memset(&cmd, 0, sizeof cmd); 3467 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3468 cmd.chan = chan; 3469 3470 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3471 maxpwr = sc->maxpwr5GHz; 3472 rf_gain = iwn4965_rf_gain_5ghz; 3473 dsp_gain = iwn4965_dsp_gain_5ghz; 3474 } else { 3475 maxpwr = sc->maxpwr2GHz; 3476 rf_gain = iwn4965_rf_gain_2ghz; 3477 dsp_gain = iwn4965_dsp_gain_2ghz; 3478 } 3479 3480 /* Compute voltage compensation. */ 3481 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3482 if (vdiff > 0) 3483 vdiff *= 2; 3484 if (abs(vdiff) > 2) 3485 vdiff = 0; 3486 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3487 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3488 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 3489 3490 /* Get channel's attenuation group. */ 3491 if (chan <= 20) /* 1-20 */ 3492 grp = 4; 3493 else if (chan <= 43) /* 34-43 */ 3494 grp = 0; 3495 else if (chan <= 70) /* 44-70 */ 3496 grp = 1; 3497 else if (chan <= 124) /* 71-124 */ 3498 grp = 2; 3499 else /* 125-200 */ 3500 grp = 3; 3501 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3502 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 3503 3504 /* Get channel's sub-band. */ 3505 for (i = 0; i < IWN_NBANDS; i++) 3506 if (sc->bands[i].lo != 0 && 3507 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3508 break; 3509 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3510 return EINVAL; 3511 chans = sc->bands[i].chans; 3512 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3513 "%s: chan %d sub-band=%d\n", __func__, chan, i); 3514 3515 for (c = 0; c < 2; c++) { 3516 uint8_t power, gain, temp; 3517 int maxchpwr, pwr, ridx, idx; 3518 3519 power = interpolate(chan, 3520 chans[0].num, chans[0].samples[c][1].power, 3521 chans[1].num, chans[1].samples[c][1].power, 1); 3522 gain = interpolate(chan, 3523 chans[0].num, chans[0].samples[c][1].gain, 3524 chans[1].num, chans[1].samples[c][1].gain, 1); 3525 temp = interpolate(chan, 3526 chans[0].num, chans[0].samples[c][1].temp, 3527 chans[1].num, chans[1].samples[c][1].temp, 1); 3528 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3529 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 3530 __func__, c, power, gain, temp); 3531 3532 /* Compute temperature compensation. */ 3533 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3534 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3535 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 3536 __func__, tdiff, sc->temp, temp); 3537 3538 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3539 maxchpwr = sc->maxpwr[chan] * 2; 3540 if ((ridx / 8) & 1) 3541 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3542 3543 pwr = maxpwr; 3544 3545 /* Adjust TX power based on rate. */ 3546 if ((ridx % 8) == 5) 3547 pwr -= 15; /* OFDM48: -7.5dB */ 3548 else if ((ridx % 8) == 6) 3549 pwr -= 17; /* OFDM54: -8.5dB */ 3550 else if ((ridx % 8) == 7) 3551 pwr -= 20; /* OFDM60: -10dB */ 3552 else 3553 pwr -= 10; /* Others: -5dB */ 3554 3555 /* Do not exceed channel's max TX power. */ 3556 if (pwr > maxchpwr) 3557 pwr = maxchpwr; 3558 3559 idx = gain - (pwr - power) - tdiff - vdiff; 3560 if ((ridx / 8) & 1) /* MIMO */ 3561 idx += (int32_t)le32toh(uc->atten[grp][c]); 3562 3563 if (cmd.band == 0) 3564 idx += 9; /* 5GHz */ 3565 if (ridx == IWN_RIDX_MAX) 3566 idx += 5; /* CCK */ 3567 3568 /* Make sure idx stays in a valid range. */ 3569 if (idx < 0) 3570 idx = 0; 3571 else if (idx > IWN4965_MAX_PWR_INDEX) 3572 idx = IWN4965_MAX_PWR_INDEX; 3573 3574 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3575 "%s: Tx chain %d, rate idx %d: power=%d\n", 3576 __func__, c, ridx, idx); 3577 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3578 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3579 } 3580 } 3581 3582 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3583 "%s: set tx power for chan %d\n", __func__, chan); 3584 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3585 3586 #undef interpolate 3587 #undef fdivround 3588 } 3589 3590 int 3591 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3592 int async) 3593 { 3594 struct iwn5000_cmd_txpower cmd; 3595 3596 /* 3597 * TX power calibration is handled automatically by the firmware 3598 * for 5000 Series. 3599 */ 3600 memset(&cmd, 0, sizeof cmd); 3601 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3602 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3603 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3604 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 3605 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3606 } 3607 3608 /* 3609 * Retrieve the maximum RSSI (in dBm) among receivers. 3610 */ 3611 int 3612 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3613 { 3614 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3615 uint8_t mask, agc; 3616 int rssi; 3617 3618 mask = (le16toh(phy->antenna) >> 4) & 0x7; 3619 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3620 3621 rssi = 0; 3622 #if 0 3623 if (mask & IWN_ANT_A) /* Ant A */ 3624 rssi = max(rssi, phy->rssi[0]); 3625 if (mask & IWN_ATH_B) /* Ant B */ 3626 rssi = max(rssi, phy->rssi[2]); 3627 if (mask & IWN_ANT_C) /* Ant C */ 3628 rssi = max(rssi, phy->rssi[4]); 3629 #else 3630 rssi = max(rssi, phy->rssi[0]); 3631 rssi = max(rssi, phy->rssi[2]); 3632 rssi = max(rssi, phy->rssi[4]); 3633 #endif 3634 3635 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d " 3636 "result %d\n", __func__, agc, mask, 3637 phy->rssi[0], phy->rssi[2], phy->rssi[4], 3638 rssi - agc - IWN_RSSI_TO_DBM); 3639 return rssi - agc - IWN_RSSI_TO_DBM; 3640 } 3641 3642 int 3643 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3644 { 3645 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3646 int rssi; 3647 uint8_t agc; 3648 3649 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3650 3651 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3652 le16toh(phy->rssi[1]) & 0xff); 3653 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3654 3655 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d " 3656 "result %d\n", __func__, agc, 3657 phy->rssi[0], phy->rssi[2], phy->rssi[4], 3658 rssi - agc - IWN_RSSI_TO_DBM); 3659 return rssi - agc - IWN_RSSI_TO_DBM; 3660 } 3661 3662 /* 3663 * Retrieve the average noise (in dBm) among receivers. 3664 */ 3665 int 3666 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3667 { 3668 int i, total, nbant, noise; 3669 3670 total = nbant = 0; 3671 for (i = 0; i < 3; i++) { 3672 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3673 continue; 3674 total += noise; 3675 nbant++; 3676 } 3677 /* There should be at least one antenna but check anyway. */ 3678 return (nbant == 0) ? -127 : (total / nbant) - 107; 3679 } 3680 3681 /* 3682 * Compute temperature (in degC) from last received statistics. 3683 */ 3684 int 3685 iwn4965_get_temperature(struct iwn_softc *sc) 3686 { 3687 struct iwn_ucode_info *uc = &sc->ucode_info; 3688 int32_t r1, r2, r3, r4, temp; 3689 3690 r1 = le32toh(uc->temp[0].chan20MHz); 3691 r2 = le32toh(uc->temp[1].chan20MHz); 3692 r3 = le32toh(uc->temp[2].chan20MHz); 3693 r4 = le32toh(sc->rawtemp); 3694 3695 if (r1 == r3) /* Prevents division by 0 (should not happen.) */ 3696 return 0; 3697 3698 /* Sign-extend 23-bit R4 value to 32-bit. */ 3699 r4 = (r4 << 8) >> 8; 3700 /* Compute temperature in Kelvin. */ 3701 temp = (259 * (r4 - r2)) / (r3 - r1); 3702 temp = (temp * 97) / 100 + 8; 3703 3704 return IWN_KTOC(temp); 3705 } 3706 3707 int 3708 iwn5000_get_temperature(struct iwn_softc *sc) 3709 { 3710 /* 3711 * Temperature is not used by the driver for 5000 Series because 3712 * TX power calibration is handled by firmware. We export it to 3713 * users through the sensor framework though. 3714 */ 3715 return le32toh(sc->rawtemp); 3716 } 3717 3718 /* 3719 * Initialize sensitivity calibration state machine. 3720 */ 3721 int 3722 iwn_init_sensitivity(struct iwn_softc *sc) 3723 { 3724 const struct iwn_hal *hal = sc->sc_hal; 3725 struct iwn_calib_state *calib = &sc->calib; 3726 uint32_t flags; 3727 int error; 3728 3729 /* Reset calibration state machine. */ 3730 memset(calib, 0, sizeof (*calib)); 3731 calib->state = IWN_CALIB_STATE_INIT; 3732 calib->cck_state = IWN_CCK_STATE_HIFA; 3733 /* Set initial correlation values. */ 3734 calib->ofdm_x1 = hal->limits->min_ofdm_x1; 3735 calib->ofdm_mrc_x1 = hal->limits->min_ofdm_mrc_x1; 3736 calib->ofdm_x4 = 90; 3737 calib->ofdm_mrc_x4 = hal->limits->min_ofdm_mrc_x4; 3738 calib->cck_x4 = 125; 3739 calib->cck_mrc_x4 = hal->limits->min_cck_mrc_x4; 3740 calib->energy_cck = hal->limits->energy_cck; 3741 3742 /* Write initial sensitivity. */ 3743 error = iwn_send_sensitivity(sc); 3744 if (error != 0) 3745 return error; 3746 3747 /* Write initial gains. */ 3748 error = hal->init_gains(sc); 3749 if (error != 0) 3750 return error; 3751 3752 /* Request statistics at each beacon interval. */ 3753 flags = 0; 3754 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__); 3755 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3756 } 3757 3758 /* 3759 * Collect noise and RSSI statistics for the first 20 beacons received 3760 * after association and use them to determine connected antennas and 3761 * to set differential gains. 3762 */ 3763 void 3764 iwn_collect_noise(struct iwn_softc *sc, 3765 const struct iwn_rx_general_stats *stats) 3766 { 3767 const struct iwn_hal *hal = sc->sc_hal; 3768 struct iwn_calib_state *calib = &sc->calib; 3769 uint32_t val; 3770 int i; 3771 3772 /* Accumulate RSSI and noise for all 3 antennas. */ 3773 for (i = 0; i < 3; i++) { 3774 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 3775 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 3776 } 3777 /* NB: We update differential gains only once after 20 beacons. */ 3778 if (++calib->nbeacons < 20) 3779 return; 3780 3781 /* Determine highest average RSSI. */ 3782 val = MAX(calib->rssi[0], calib->rssi[1]); 3783 val = MAX(calib->rssi[2], val); 3784 3785 /* Determine which antennas are connected. */ 3786 sc->antmsk = 0; 3787 for (i = 0; i < 3; i++) 3788 if (val - calib->rssi[i] <= 15 * 20) 3789 sc->antmsk |= 1 << i; 3790 /* If none of the TX antennas are connected, keep at least one. */ 3791 if ((sc->antmsk & sc->txantmsk) == 0) 3792 sc->antmsk |= IWN_LSB(sc->txantmsk); 3793 3794 (void)hal->set_gains(sc); 3795 calib->state = IWN_CALIB_STATE_RUN; 3796 3797 #ifdef notyet 3798 /* XXX Disable RX chains with no antennas connected. */ 3799 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->antmsk)); 3800 (void)iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->rxon, hal->rxonsz, 1); 3801 #endif 3802 3803 #if 0 3804 /* XXX: not yet */ 3805 /* Enable power-saving mode if requested by user. */ 3806 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 3807 (void)iwn_set_pslevel(sc, 0, 3, 1); 3808 #endif 3809 } 3810 3811 int 3812 iwn4965_init_gains(struct iwn_softc *sc) 3813 { 3814 struct iwn_phy_calib_gain cmd; 3815 3816 memset(&cmd, 0, sizeof cmd); 3817 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3818 /* Differential gains initially set to 0 for all 3 antennas. */ 3819 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3820 "%s: setting initial differential gains\n", __func__); 3821 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3822 } 3823 3824 int 3825 iwn5000_init_gains(struct iwn_softc *sc) 3826 { 3827 struct iwn_phy_calib cmd; 3828 3829 if (sc->hw_type == IWN_HW_REV_TYPE_6000 || 3830 sc->hw_type == IWN_HW_REV_TYPE_6050) 3831 return 0; 3832 3833 memset(&cmd, 0, sizeof cmd); 3834 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 3835 cmd.ngroups = 1; 3836 cmd.isvalid = 1; 3837 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3838 "%s: setting initial differential gains\n", __func__); 3839 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3840 } 3841 3842 int 3843 iwn4965_set_gains(struct iwn_softc *sc) 3844 { 3845 struct iwn_calib_state *calib = &sc->calib; 3846 struct iwn_phy_calib_gain cmd; 3847 int i, delta, noise; 3848 3849 /* Get minimal noise among connected antennas. */ 3850 noise = INT_MAX; /* NB: There's at least one antennaiwn. */ 3851 for (i = 0; i < 3; i++) 3852 if (sc->antmsk & (1 << i)) 3853 noise = MIN(calib->noise[i], noise); 3854 3855 memset(&cmd, 0, sizeof cmd); 3856 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 3857 /* Set differential gains for connected antennas. */ 3858 for (i = 0; i < 3; i++) { 3859 if (sc->antmsk & (1 << i)) { 3860 /* Compute attenuation (in unit of 1.5dB). */ 3861 delta = (noise - (int32_t)calib->noise[i]) / 30; 3862 /* NB: delta <= 0 */ 3863 /* Limit to [-4.5dB,0]. */ 3864 cmd.gain[i] = MIN(abs(delta), 3); 3865 if (delta < 0) 3866 cmd.gain[i] |= 1 << 2; /* sign bit */ 3867 } 3868 } 3869 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3870 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 3871 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->antmsk); 3872 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3873 } 3874 3875 int 3876 iwn5000_set_gains(struct iwn_softc *sc) 3877 { 3878 struct iwn_calib_state *calib = &sc->calib; 3879 struct iwn_phy_calib_gain cmd; 3880 int i, delta; 3881 3882 if (sc->hw_type == IWN_HW_REV_TYPE_6000 || 3883 sc->hw_type == IWN_HW_REV_TYPE_6050) 3884 return 0; 3885 3886 memset(&cmd, 0, sizeof cmd); 3887 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 3888 cmd.ngroups = 1; 3889 cmd.isvalid = 1; 3890 /* Set differential gains for antennas B and C. */ 3891 for (i = 1; i < 3; i++) { 3892 if (sc->antmsk & (1 << i)) { 3893 /* The delta is relative to antenna A. */ 3894 delta = ((int32_t)calib->noise[0] - 3895 (int32_t)calib->noise[i]) / 30; 3896 /* Limit to [-4.5dB,+4.5dB]. */ 3897 cmd.gain[i - 1] = MIN(abs(delta), 3); 3898 if (delta < 0) 3899 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 3900 } 3901 } 3902 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3903 "setting differential gains Ant B/C: %x/%x (%x)\n", 3904 cmd.gain[0], cmd.gain[1], sc->antmsk); 3905 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 3906 } 3907 3908 /* 3909 * Tune RF RX sensitivity based on the number of false alarms detected 3910 * during the last beacon period. 3911 */ 3912 void 3913 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 3914 { 3915 #define inc(val, inc, max) \ 3916 if ((val) < (max)) { \ 3917 if ((val) < (max) - (inc)) \ 3918 (val) += (inc); \ 3919 else \ 3920 (val) = (max); \ 3921 needs_update = 1; \ 3922 } 3923 #define dec(val, dec, min) \ 3924 if ((val) > (min)) { \ 3925 if ((val) > (min) + (dec)) \ 3926 (val) -= (dec); \ 3927 else \ 3928 (val) = (min); \ 3929 needs_update = 1; \ 3930 } 3931 3932 const struct iwn_hal *hal = sc->sc_hal; 3933 const struct iwn_sensitivity_limits *limits = hal->limits; 3934 struct iwn_calib_state *calib = &sc->calib; 3935 uint32_t val, rxena, fa; 3936 uint32_t energy[3], energy_min; 3937 uint8_t noise[3], noise_ref; 3938 int i, needs_update = 0; 3939 3940 /* Check that we've been enabled long enough. */ 3941 rxena = le32toh(stats->general.load); 3942 if (rxena == 0) 3943 return; 3944 3945 /* Compute number of false alarms since last call for OFDM. */ 3946 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 3947 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 3948 fa *= 200 * 1024; /* 200TU */ 3949 3950 /* Save counters values for next call. */ 3951 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 3952 calib->fa_ofdm = le32toh(stats->ofdm.fa); 3953 3954 if (fa > 50 * rxena) { 3955 /* High false alarm count, decrease sensitivity. */ 3956 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3957 "%s: OFDM high false alarm count: %u\n", __func__, fa); 3958 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 3959 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 3960 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 3961 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 3962 3963 } else if (fa < 5 * rxena) { 3964 /* Low false alarm count, increase sensitivity. */ 3965 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3966 "%s: OFDM low false alarm count: %u\n", __func__, fa); 3967 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 3968 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 3969 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 3970 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 3971 } 3972 3973 /* Compute maximum noise among 3 receivers. */ 3974 for (i = 0; i < 3; i++) 3975 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 3976 val = MAX(noise[0], noise[1]); 3977 val = MAX(noise[2], val); 3978 /* Insert it into our samples table. */ 3979 calib->noise_samples[calib->cur_noise_sample] = val; 3980 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 3981 3982 /* Compute maximum noise among last 20 samples. */ 3983 noise_ref = calib->noise_samples[0]; 3984 for (i = 1; i < 20; i++) 3985 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 3986 3987 /* Compute maximum energy among 3 receivers. */ 3988 for (i = 0; i < 3; i++) 3989 energy[i] = le32toh(stats->general.energy[i]); 3990 val = MIN(energy[0], energy[1]); 3991 val = MIN(energy[2], val); 3992 /* Insert it into our samples table. */ 3993 calib->energy_samples[calib->cur_energy_sample] = val; 3994 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 3995 3996 /* Compute minimum energy among last 10 samples. */ 3997 energy_min = calib->energy_samples[0]; 3998 for (i = 1; i < 10; i++) 3999 energy_min = MAX(energy_min, calib->energy_samples[i]); 4000 energy_min += 6; 4001 4002 /* Compute number of false alarms since last call for CCK. */ 4003 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4004 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4005 fa *= 200 * 1024; /* 200TU */ 4006 4007 /* Save counters values for next call. */ 4008 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4009 calib->fa_cck = le32toh(stats->cck.fa); 4010 4011 if (fa > 50 * rxena) { 4012 /* High false alarm count, decrease sensitivity. */ 4013 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4014 "%s: CCK high false alarm count: %u\n", __func__, fa); 4015 calib->cck_state = IWN_CCK_STATE_HIFA; 4016 calib->low_fa = 0; 4017 4018 if (calib->cck_x4 > 160) { 4019 calib->noise_ref = noise_ref; 4020 if (calib->energy_cck > 2) 4021 dec(calib->energy_cck, 2, energy_min); 4022 } 4023 if (calib->cck_x4 < 160) { 4024 calib->cck_x4 = 161; 4025 needs_update = 1; 4026 } else 4027 inc(calib->cck_x4, 3, limits->max_cck_x4); 4028 4029 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4030 4031 } else if (fa < 5 * rxena) { 4032 /* Low false alarm count, increase sensitivity. */ 4033 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4034 "%s: CCK low false alarm count: %u\n", __func__, fa); 4035 calib->cck_state = IWN_CCK_STATE_LOFA; 4036 calib->low_fa++; 4037 4038 if (calib->cck_state != IWN_CCK_STATE_INIT && 4039 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4040 calib->low_fa > 100)) { 4041 inc(calib->energy_cck, 2, limits->min_energy_cck); 4042 dec(calib->cck_x4, 3, limits->min_cck_x4); 4043 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4044 } 4045 } else { 4046 /* Not worth to increase or decrease sensitivity. */ 4047 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4048 "%s: CCK normal false alarm count: %u\n", __func__, fa); 4049 calib->low_fa = 0; 4050 calib->noise_ref = noise_ref; 4051 4052 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4053 /* Previous interval had many false alarms. */ 4054 dec(calib->energy_cck, 8, energy_min); 4055 } 4056 calib->cck_state = IWN_CCK_STATE_INIT; 4057 } 4058 4059 if (needs_update) 4060 (void)iwn_send_sensitivity(sc); 4061 #undef dec 4062 #undef inc 4063 } 4064 4065 int 4066 iwn_send_sensitivity(struct iwn_softc *sc) 4067 { 4068 const struct iwn_hal *hal = sc->sc_hal; 4069 struct iwn_calib_state *calib = &sc->calib; 4070 struct iwn_sensitivity_cmd cmd; 4071 4072 memset(&cmd, 0, sizeof cmd); 4073 cmd.which = IWN_SENSITIVITY_WORKTBL; 4074 /* OFDM modulation. */ 4075 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4076 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4077 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4078 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4079 cmd.energy_ofdm = htole16(hal->limits->energy_ofdm); 4080 cmd.energy_ofdm_th = htole16(62); 4081 /* CCK modulation. */ 4082 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4083 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4084 cmd.energy_cck = htole16(calib->energy_cck); 4085 /* Barker modulation: use default values. */ 4086 cmd.corr_barker = htole16(190); 4087 cmd.corr_barker_mrc = htole16(390); 4088 4089 DPRINTF(sc, IWN_DEBUG_RESET, 4090 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 4091 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4092 calib->ofdm_mrc_x4, calib->cck_x4, 4093 calib->cck_mrc_x4, calib->energy_cck); 4094 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1); 4095 } 4096 4097 /* 4098 * Set STA mode power saving level (between 0 and 5). 4099 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4100 */ 4101 int 4102 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4103 { 4104 const struct iwn_pmgt *pmgt; 4105 struct iwn_pmgt_cmd cmd; 4106 uint32_t max, skip_dtim; 4107 uint32_t tmp; 4108 int i; 4109 4110 /* Select which PS parameters to use. */ 4111 if (dtim <= 2) 4112 pmgt = &iwn_pmgt[0][level]; 4113 else if (dtim <= 10) 4114 pmgt = &iwn_pmgt[1][level]; 4115 else 4116 pmgt = &iwn_pmgt[2][level]; 4117 4118 memset(&cmd, 0, sizeof cmd); 4119 if (level != 0) /* not CAM */ 4120 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4121 if (level == 5) 4122 cmd.flags |= htole16(IWN_PS_FAST_PD); 4123 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4124 if (!(tmp & 0x1)) /* L0s Entry disabled. */ 4125 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4126 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4127 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4128 4129 if (dtim == 0) { 4130 dtim = 1; 4131 skip_dtim = 0; 4132 } else 4133 skip_dtim = pmgt->skip_dtim; 4134 if (skip_dtim != 0) { 4135 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4136 max = pmgt->intval[4]; 4137 if (max == (uint32_t)-1) 4138 max = dtim * (skip_dtim + 1); 4139 else if (max > dtim) 4140 max = (max / dtim) * dtim; 4141 } else 4142 max = dtim; 4143 for (i = 0; i < 5; i++) 4144 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4145 4146 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 4147 level); 4148 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4149 } 4150 4151 int 4152 iwn_config(struct iwn_softc *sc) 4153 { 4154 const struct iwn_hal *hal = sc->sc_hal; 4155 struct ifnet *ifp = sc->sc_ifp; 4156 struct ieee80211com *ic = ifp->if_l2com; 4157 struct iwn_bluetooth bluetooth; 4158 int error; 4159 uint16_t rxchain; 4160 4161 /* Set power saving level to CAM during initialization. */ 4162 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 4163 device_printf(sc->sc_dev, 4164 "%s: could not set power saving level, error %d\n", 4165 __func__, error); 4166 return error; 4167 } 4168 4169 /* Configure bluetooth coexistence. */ 4170 memset(&bluetooth, 0, sizeof bluetooth); 4171 bluetooth.flags = 3; 4172 bluetooth.lead = 0xaa; 4173 bluetooth.kill = 1; 4174 DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n", 4175 __func__); 4176 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4177 if (error != 0) { 4178 device_printf(sc->sc_dev, 4179 "%s: could not configure bluetooth coexistence, error %d\n", 4180 __func__, error); 4181 return error; 4182 } 4183 4184 /* Configure adapter. */ 4185 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4186 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 4187 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 4188 /* Set default channel. */ 4189 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 4190 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4191 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 4192 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4193 switch (ic->ic_opmode) { 4194 case IEEE80211_M_STA: 4195 sc->rxon.mode = IWN_MODE_STA; 4196 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4197 break; 4198 case IEEE80211_M_IBSS: 4199 case IEEE80211_M_AHDEMO: 4200 sc->rxon.mode = IWN_MODE_IBSS; 4201 break; 4202 case IEEE80211_M_HOSTAP: 4203 sc->rxon.mode = IWN_MODE_HOSTAP; 4204 break; 4205 case IEEE80211_M_MONITOR: 4206 sc->rxon.mode = IWN_MODE_MONITOR; 4207 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4208 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4209 break; 4210 default: 4211 /* Should not get there. */ 4212 break; 4213 } 4214 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4215 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4216 sc->rxon.ht_single_mask = 0xff; 4217 sc->rxon.ht_dual_mask = 0xff; 4218 rxchain = IWN_RXCHAIN_VALID(IWN_ANT_ABC) | IWN_RXCHAIN_IDLE_COUNT(2) | 4219 IWN_RXCHAIN_MIMO_COUNT(2); 4220 sc->rxon.rxchain = htole16(rxchain); 4221 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 4222 error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->rxon, hal->rxonsz, 0); 4223 if (error != 0) { 4224 device_printf(sc->sc_dev, 4225 "%s: configure command failed\n", __func__); 4226 return error; 4227 } 4228 sc->sc_curchan = ic->ic_curchan; 4229 4230 /* Configuration has changed, set TX power accordingly. */ 4231 error = hal->set_txpower(sc, ic->ic_curchan, 0); 4232 if (error != 0) { 4233 device_printf(sc->sc_dev, 4234 "%s: could not set TX power\n", __func__); 4235 return error; 4236 } 4237 4238 error = iwn_add_broadcast_node(sc, ic->ic_curchan, 0); 4239 if (error != 0) { 4240 device_printf(sc->sc_dev, 4241 "%s: could not add broadcast node\n", __func__); 4242 return error; 4243 } 4244 4245 error = iwn_set_critical_temp(sc); 4246 if (error != 0) { 4247 device_printf(sc->sc_dev, 4248 "%s: could not set critical temperature\n", __func__); 4249 return error; 4250 } 4251 return 0; 4252 } 4253 4254 int 4255 iwn_scan(struct iwn_softc *sc) 4256 { 4257 struct ifnet *ifp = sc->sc_ifp; 4258 struct ieee80211com *ic = ifp->if_l2com; 4259 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 4260 struct iwn_scan_hdr *hdr; 4261 struct iwn_cmd_data *tx; 4262 struct iwn_scan_essid *essid; 4263 struct iwn_scan_chan *chan; 4264 struct ieee80211_frame *wh; 4265 struct ieee80211_rateset *rs; 4266 struct ieee80211_channel *c; 4267 enum ieee80211_phymode mode; 4268 int buflen, error, nrates; 4269 uint16_t rxchain; 4270 uint8_t *buf, *frm, txant; 4271 4272 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4273 if (buf == NULL) { 4274 device_printf(sc->sc_dev, 4275 "%s: could not allocate buffer for scan command\n", 4276 __func__); 4277 return ENOMEM; 4278 } 4279 hdr = (struct iwn_scan_hdr *)buf; 4280 4281 /* 4282 * Move to the next channel if no frames are received within 10ms 4283 * after sending the probe request. 4284 */ 4285 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4286 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4287 4288 /* Select antennas for scanning. */ 4289 rxchain = IWN_RXCHAIN_FORCE | IWN_RXCHAIN_VALID(IWN_ANT_ABC) | 4290 IWN_RXCHAIN_MIMO(IWN_ANT_ABC); 4291 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 4292 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4293 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4294 rxchain |= IWN_RXCHAIN_SEL(IWN_ANT_B | IWN_ANT_C); 4295 } else /* Use all available RX antennas. */ 4296 rxchain |= IWN_RXCHAIN_SEL(IWN_ANT_ABC); 4297 hdr->rxchain = htole16(rxchain); 4298 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4299 4300 tx = (struct iwn_cmd_data *)(hdr + 1); 4301 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4302 tx->id = sc->sc_hal->broadcast_id; 4303 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4304 4305 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { 4306 hdr->crc_threshold = htole16(1); 4307 /* Send probe requests at 6Mbps. */ 4308 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4309 } else { 4310 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4311 /* Send probe requests at 1Mbps. */ 4312 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4313 tx->rflags = IWN_RFLAG_CCK; 4314 } 4315 /* Use the first valid TX antenna. */ 4316 txant = IWN_LSB(sc->txantmsk); 4317 tx->rflags |= IWN_RFLAG_ANT(txant); 4318 4319 essid = (struct iwn_scan_essid *)(tx + 1); 4320 if (ss->ss_ssid[0].len != 0) { 4321 essid[0].id = IEEE80211_ELEMID_SSID; 4322 essid[0].len = ss->ss_ssid[0].len; 4323 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4324 } 4325 /* 4326 * Build a probe request frame. Most of the following code is a 4327 * copy & paste of what is done in net80211. 4328 */ 4329 wh = (struct ieee80211_frame *)(essid + 20); 4330 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4331 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4332 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4333 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4334 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 4335 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4336 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4337 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4338 4339 frm = (uint8_t *)(wh + 1); 4340 4341 /* Add SSID IE. */ 4342 *frm++ = IEEE80211_ELEMID_SSID; 4343 *frm++ = ss->ss_ssid[0].len; 4344 memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4345 frm += ss->ss_ssid[0].len; 4346 4347 mode = ieee80211_chan2mode(ic->ic_curchan); 4348 rs = &ic->ic_sup_rates[mode]; 4349 4350 /* Add supported rates IE. */ 4351 *frm++ = IEEE80211_ELEMID_RATES; 4352 nrates = rs->rs_nrates; 4353 if (nrates > IEEE80211_RATE_SIZE) 4354 nrates = IEEE80211_RATE_SIZE; 4355 *frm++ = nrates; 4356 memcpy(frm, rs->rs_rates, nrates); 4357 frm += nrates; 4358 4359 /* Add supported xrates IE. */ 4360 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 4361 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; 4362 *frm++ = IEEE80211_ELEMID_XRATES; 4363 *frm++ = (uint8_t)nrates; 4364 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); 4365 frm += nrates; 4366 } 4367 4368 /* Set length of probe request. */ 4369 tx->len = htole16(frm - (uint8_t *)wh); 4370 4371 c = ic->ic_curchan; 4372 chan = (struct iwn_scan_chan *)frm; 4373 chan->chan = ieee80211_chan2ieee(ic, c); 4374 chan->flags = 0; 4375 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) 4376 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4377 if (ss->ss_nssid > 0) 4378 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4379 chan->dsp_gain = 0x6e; 4380 if (IEEE80211_IS_CHAN_5GHZ(c)) { 4381 chan->rf_gain = 0x3b; 4382 chan->active = htole16(24); 4383 chan->passive = htole16(110); 4384 } else { 4385 chan->rf_gain = 0x28; 4386 chan->active = htole16(36); 4387 chan->passive = htole16(120); 4388 } 4389 hdr->nchan++; 4390 chan++; 4391 4392 DPRINTF(sc, IWN_DEBUG_STATE, "%s: chan %u flags 0x%x rf_gain 0x%x " 4393 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 4394 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 4395 chan->active, chan->passive); 4396 4397 buflen = (uint8_t *)chan - buf; 4398 hdr->len = htole16(buflen); 4399 4400 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 4401 hdr->nchan); 4402 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4403 free(buf, M_DEVBUF); 4404 return error; 4405 } 4406 4407 int 4408 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 4409 { 4410 const struct iwn_hal *hal = sc->sc_hal; 4411 struct ifnet *ifp = sc->sc_ifp; 4412 struct ieee80211com *ic = ifp->if_l2com; 4413 struct ieee80211_node *ni = vap->iv_bss; 4414 int error; 4415 4416 sc->calib.state = IWN_CALIB_STATE_INIT; 4417 4418 /* Update adapter's configuration. */ 4419 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4420 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4421 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4422 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4423 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4424 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4425 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4426 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4427 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4428 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4429 sc->rxon.cck_mask = 0; 4430 sc->rxon.ofdm_mask = 0x15; 4431 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4432 sc->rxon.cck_mask = 0x03; 4433 sc->rxon.ofdm_mask = 0; 4434 } else { 4435 /* XXX assume 802.11b/g */ 4436 sc->rxon.cck_mask = 0x0f; 4437 sc->rxon.ofdm_mask = 0x15; 4438 } 4439 DPRINTF(sc, IWN_DEBUG_STATE, 4440 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4441 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4442 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4443 __func__, 4444 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4445 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4446 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4447 le16toh(sc->rxon.rxchain), 4448 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4449 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4450 error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->rxon, hal->rxonsz, 1); 4451 if (error != 0) { 4452 device_printf(sc->sc_dev, 4453 "%s: could not configure, error %d\n", __func__, error); 4454 return error; 4455 } 4456 sc->sc_curchan = ic->ic_curchan; 4457 4458 /* Configuration has changed, set TX power accordingly. */ 4459 if ((error = hal->set_txpower(sc, ni->ni_chan, 1)) != 0) { 4460 device_printf(sc->sc_dev, 4461 "%s: could not set Tx power, error %d\n", __func__, error); 4462 return error; 4463 } 4464 /* 4465 * Reconfiguring RXON clears the firmware's nodes table so we must 4466 * add the broadcast node again. 4467 */ 4468 error = iwn_add_broadcast_node(sc, ic->ic_curchan, 1); 4469 if (error != 0) { 4470 device_printf(sc->sc_dev, 4471 "%s: 1 could not add broadcast node, error %d\n", 4472 __func__, error); 4473 return error; 4474 } 4475 return 0; 4476 } 4477 4478 /* 4479 * Configure the adapter for associated state. 4480 */ 4481 int 4482 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 4483 { 4484 #define MS(v,x) (((v) & x) >> x##_S) 4485 const struct iwn_hal *hal = sc->sc_hal; 4486 struct ifnet *ifp = sc->sc_ifp; 4487 struct ieee80211com *ic = ifp->if_l2com; 4488 struct ieee80211_node *ni = vap->iv_bss; 4489 struct iwn_node_info node; 4490 int error, maxrxampdu, ampdudensity; 4491 4492 sc->calib.state = IWN_CALIB_STATE_INIT; 4493 4494 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4495 /* link LED blinks while monitoring */ 4496 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4497 return 0; 4498 } 4499 error = iwn_set_timing(sc, ni); 4500 if (error != 0) { 4501 device_printf(sc->sc_dev, 4502 "%s: could not set timing, error %d\n", __func__, error); 4503 return error; 4504 } 4505 4506 /* Update adapter's configuration. */ 4507 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4508 /* Short preamble and slot time are negotiated when associating. */ 4509 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4510 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4511 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4512 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4513 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4514 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 4515 sc->rxon.flags &= ~htole32(IWN_RXON_HT); 4516 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) 4517 sc->rxon.flags |= htole32(IWN_RXON_HT40U); 4518 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 4519 sc->rxon.flags |= htole32(IWN_RXON_HT40D); 4520 else 4521 sc->rxon.flags |= htole32(IWN_RXON_HT20); 4522 sc->rxon.rxchain = htole16( 4523 IWN_RXCHAIN_VALID(3) 4524 | IWN_RXCHAIN_MIMO_COUNT(3) 4525 | IWN_RXCHAIN_IDLE_COUNT(1) 4526 | IWN_RXCHAIN_MIMO_FORCE); 4527 4528 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); 4529 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); 4530 } else 4531 maxrxampdu = ampdudensity = 0; 4532 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4533 4534 DPRINTF(sc, IWN_DEBUG_STATE, 4535 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4536 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4537 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4538 __func__, 4539 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4540 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4541 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4542 le16toh(sc->rxon.rxchain), 4543 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4544 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4545 error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->rxon, hal->rxonsz, 1); 4546 if (error != 0) { 4547 device_printf(sc->sc_dev, 4548 "%s: could not update configuration, error %d\n", 4549 __func__, error); 4550 return error; 4551 } 4552 sc->sc_curchan = ni->ni_chan; 4553 4554 /* Configuration has changed, set TX power accordingly. */ 4555 error = hal->set_txpower(sc, ni->ni_chan, 1); 4556 if (error != 0) { 4557 device_printf(sc->sc_dev, 4558 "%s: could not set Tx power, error %d\n", __func__, error); 4559 return error; 4560 } 4561 4562 /* Add BSS node. */ 4563 memset(&node, 0, sizeof node); 4564 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4565 node.id = IWN_ID_BSS; 4566 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(maxrxampdu) 4567 | IWN_AMDPU_DENSITY(ampdudensity)); 4568 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n", 4569 __func__, node.id, le32toh(node.htflags)); 4570 error = hal->add_node(sc, &node, 1); 4571 if (error != 0) { 4572 device_printf(sc->sc_dev, "could not add BSS node\n"); 4573 return error; 4574 } 4575 error = iwn_set_link_quality(sc, node.id, ni->ni_chan, 1); 4576 if (error != 0) { 4577 device_printf(sc->sc_dev, 4578 "%s: could not setup MRR for node %d, error %d\n", 4579 __func__, node.id, error); 4580 return error; 4581 } 4582 4583 error = iwn_init_sensitivity(sc); 4584 if (error != 0) { 4585 device_printf(sc->sc_dev, 4586 "%s: could not set sensitivity, error %d\n", 4587 __func__, error); 4588 return error; 4589 } 4590 4591 /* Start periodic calibration timer. */ 4592 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4593 iwn_calib_reset(sc); 4594 4595 /* Link LED always on while associated. */ 4596 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4597 4598 return 0; 4599 #undef MS 4600 } 4601 4602 /* 4603 * Query calibration tables from the initialization firmware. We do this 4604 * only once at first boot. Called from a process context. 4605 */ 4606 int 4607 iwn5000_query_calibration(struct iwn_softc *sc) 4608 { 4609 struct iwn5000_calib_config cmd; 4610 int error; 4611 4612 memset(&cmd, 0, sizeof cmd); 4613 cmd.ucode.once.enable = 0xffffffff; 4614 cmd.ucode.once.start = 0xffffffff; 4615 cmd.ucode.once.send = 0xffffffff; 4616 cmd.ucode.flags = 0xffffffff; 4617 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 4618 __func__); 4619 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 4620 if (error != 0) 4621 return error; 4622 4623 /* Wait at most two seconds for calibration to complete. */ 4624 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz); 4625 } 4626 4627 /* 4628 * Send calibration results to the runtime firmware. These results were 4629 * obtained on first boot from the initialization firmware. 4630 */ 4631 int 4632 iwn5000_send_calibration(struct iwn_softc *sc) 4633 { 4634 int idx, error; 4635 4636 for (idx = 0; idx < 5; idx++) { 4637 if (sc->calibcmd[idx].buf == NULL) 4638 continue; /* No results available. */ 4639 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4640 "send calibration result idx=%d len=%d\n", 4641 idx, sc->calibcmd[idx].len); 4642 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 4643 sc->calibcmd[idx].len, 0); 4644 if (error != 0) { 4645 device_printf(sc->sc_dev, 4646 "%s: could not send calibration result, error %d\n", 4647 __func__, error); 4648 return error; 4649 } 4650 } 4651 return 0; 4652 } 4653 4654 /* 4655 * This function is called after the runtime firmware notifies us of its 4656 * readiness (called in a process context.) 4657 */ 4658 int 4659 iwn4965_post_alive(struct iwn_softc *sc) 4660 { 4661 int error, qid; 4662 4663 if ((error = iwn_nic_lock(sc)) != 0) 4664 return error; 4665 4666 /* Clear TX scheduler's state in SRAM. */ 4667 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 4668 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 4669 IWN4965_SCHED_CTX_LEN); 4670 4671 /* Set physical address of TX scheduler rings (1KB aligned.) */ 4672 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 4673 4674 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 4675 4676 /* Disable chain mode for all our 16 queues. */ 4677 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 4678 4679 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 4680 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 4681 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 4682 4683 /* Set scheduler window size. */ 4684 iwn_mem_write(sc, sc->sched_base + 4685 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 4686 /* Set scheduler frame limit. */ 4687 iwn_mem_write(sc, sc->sched_base + 4688 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 4689 IWN_SCHED_LIMIT << 16); 4690 } 4691 4692 /* Enable interrupts for all our 16 queues. */ 4693 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 4694 /* Identify TX FIFO rings (0-7). */ 4695 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 4696 4697 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 4698 for (qid = 0; qid < 7; qid++) { 4699 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 4700 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 4701 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 4702 } 4703 iwn_nic_unlock(sc); 4704 return 0; 4705 } 4706 4707 /* 4708 * This function is called after the initialization or runtime firmware 4709 * notifies us of its readiness (called in a process context.) 4710 */ 4711 int 4712 iwn5000_post_alive(struct iwn_softc *sc) 4713 { 4714 struct iwn5000_wimax_coex wimax; 4715 int error, qid; 4716 4717 if ((error = iwn_nic_lock(sc)) != 0) 4718 return error; 4719 4720 /* Clear TX scheduler's state in SRAM. */ 4721 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 4722 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 4723 IWN5000_SCHED_CTX_LEN); 4724 4725 /* Set physical address of TX scheduler rings (1KB aligned.) */ 4726 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 4727 4728 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 4729 4730 /* Enable chain mode for all our 20 queues. */ 4731 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffff); 4732 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 4733 4734 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 4735 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 4736 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 4737 4738 iwn_mem_write(sc, sc->sched_base + 4739 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 4740 /* Set scheduler window size and frame limit. */ 4741 iwn_mem_write(sc, sc->sched_base + 4742 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 4743 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 4744 } 4745 4746 /* Enable interrupts for all our 20 queues. */ 4747 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 4748 /* Identify TX FIFO rings (0-7). */ 4749 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 4750 4751 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 4752 for (qid = 0; qid < 7; qid++) { 4753 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 4754 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 4755 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 4756 } 4757 iwn_nic_unlock(sc); 4758 4759 /* Configure WiMAX (IEEE 802.16e) coexistence. */ 4760 memset(&wimax, 0, sizeof wimax); 4761 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 4762 __func__); 4763 error = iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 4764 if (error != 0) { 4765 device_printf(sc->sc_dev, 4766 "%s: could not configure WiMAX coexistence, error %d\n", 4767 __func__, error); 4768 return error; 4769 } 4770 4771 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 4772 struct iwn5000_phy_calib_crystal cmd; 4773 4774 /* Perform crystal calibration. */ 4775 memset(&cmd, 0, sizeof cmd); 4776 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 4777 cmd.ngroups = 1; 4778 cmd.isvalid = 1; 4779 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 4780 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 4781 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4782 "sending crystal calibration %d, %d\n", 4783 cmd.cap_pin[0], cmd.cap_pin[1]); 4784 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 4785 if (error != 0) { 4786 device_printf(sc->sc_dev, 4787 "%s: crystal calibration failed, error %d\n", 4788 __func__, error); 4789 return error; 4790 } 4791 } 4792 if (sc->sc_flags & IWN_FLAG_FIRST_BOOT) { 4793 /* Query calibration from the initialization firmware. */ 4794 if ((error = iwn5000_query_calibration(sc)) != 0) { 4795 device_printf(sc->sc_dev, 4796 "%s: could not query calibration, error %d\n", 4797 __func__, error); 4798 return error; 4799 } 4800 /* 4801 * We have the calibration results now so we can skip 4802 * loading the initialization firmware next time. 4803 */ 4804 sc->sc_flags &= ~IWN_FLAG_FIRST_BOOT; 4805 4806 /* Reboot (call ourselves recursively!) */ 4807 iwn_hw_stop(sc); 4808 error = iwn_hw_init(sc); 4809 } else { 4810 /* Send calibration results to runtime firmware. */ 4811 error = iwn5000_send_calibration(sc); 4812 } 4813 return error; 4814 } 4815 4816 /* 4817 * The firmware boot code is small and is intended to be copied directly into 4818 * the NIC internal memory (no DMA transfer.) 4819 */ 4820 int 4821 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 4822 { 4823 int error, ntries; 4824 4825 size /= sizeof (uint32_t); 4826 4827 error = iwn_nic_lock(sc); 4828 if (error != 0) 4829 return error; 4830 4831 /* Copy microcode image into NIC memory. */ 4832 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 4833 (const uint32_t *)ucode, size); 4834 4835 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 4836 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 4837 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 4838 4839 /* Start boot load now. */ 4840 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 4841 4842 /* Wait for transfer to complete. */ 4843 for (ntries = 0; ntries < 1000; ntries++) { 4844 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 4845 IWN_BSM_WR_CTRL_START)) 4846 break; 4847 DELAY(10); 4848 } 4849 if (ntries == 1000) { 4850 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4851 __func__); 4852 iwn_nic_unlock(sc); 4853 return ETIMEDOUT; 4854 } 4855 4856 /* Enable boot after power up. */ 4857 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 4858 4859 iwn_nic_unlock(sc); 4860 return 0; 4861 } 4862 4863 int 4864 iwn4965_load_firmware(struct iwn_softc *sc) 4865 { 4866 struct iwn_fw_info *fw = &sc->fw; 4867 struct iwn_dma_info *dma = &sc->fw_dma; 4868 int error; 4869 4870 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4871 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4872 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 4873 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 4874 fw->init.text, fw->init.textsz); 4875 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 4876 4877 /* Tell adapter where to find initialization sections. */ 4878 error = iwn_nic_lock(sc); 4879 if (error != 0) 4880 return error; 4881 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 4882 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4883 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 4884 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 4885 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4886 iwn_nic_unlock(sc); 4887 4888 /* Load firmware boot code. */ 4889 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4890 if (error != 0) { 4891 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4892 __func__); 4893 return error; 4894 } 4895 /* Now press "execute". */ 4896 IWN_WRITE(sc, IWN_RESET, 0); 4897 4898 /* Wait at most one second for first alive notification. */ 4899 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 4900 if (error) { 4901 device_printf(sc->sc_dev, 4902 "%s: timeout waiting for adapter to initialize, error %d\n", 4903 __func__, error); 4904 return error; 4905 } 4906 4907 /* Retrieve current temperature for initial TX power calibration. */ 4908 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 4909 sc->temp = iwn4965_get_temperature(sc); 4910 4911 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4912 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4913 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 4914 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 4915 fw->main.text, fw->main.textsz); 4916 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 4917 4918 /* Tell adapter where to find runtime sections. */ 4919 error = iwn_nic_lock(sc); 4920 if (error != 0) 4921 return error; 4922 4923 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 4924 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4925 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 4926 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 4927 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 4928 IWN_FW_UPDATED | fw->main.textsz); 4929 iwn_nic_unlock(sc); 4930 4931 return 0; 4932 } 4933 4934 int 4935 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 4936 const uint8_t *section, int size) 4937 { 4938 struct iwn_dma_info *dma = &sc->fw_dma; 4939 int error; 4940 4941 /* Copy firmware section into pre-allocated DMA-safe memory. */ 4942 memcpy(dma->vaddr, section, size); 4943 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 4944 4945 error = iwn_nic_lock(sc); 4946 if (error != 0) 4947 return error; 4948 4949 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 4950 IWN_FH_TX_CONFIG_DMA_PAUSE); 4951 4952 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 4953 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 4954 IWN_LOADDR(dma->paddr)); 4955 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 4956 IWN_HIADDR(dma->paddr) << 28 | size); 4957 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 4958 IWN_FH_TXBUF_STATUS_TBNUM(1) | 4959 IWN_FH_TXBUF_STATUS_TBIDX(1) | 4960 IWN_FH_TXBUF_STATUS_TFBD_VALID); 4961 4962 /* Kick Flow Handler to start DMA transfer. */ 4963 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 4964 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 4965 4966 iwn_nic_unlock(sc); 4967 4968 /* Wait at most five seconds for FH DMA transfer to complete. */ 4969 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 4970 } 4971 4972 int 4973 iwn5000_load_firmware(struct iwn_softc *sc) 4974 { 4975 struct iwn_fw_part *fw; 4976 int error; 4977 4978 /* Load the initialization firmware on first boot only. */ 4979 fw = (sc->sc_flags & IWN_FLAG_FIRST_BOOT) ? 4980 &sc->fw.init : &sc->fw.main; 4981 4982 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 4983 fw->text, fw->textsz); 4984 if (error != 0) { 4985 device_printf(sc->sc_dev, 4986 "%s: could not load firmware %s section, error %d\n", 4987 __func__, ".text", error); 4988 return error; 4989 } 4990 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 4991 fw->data, fw->datasz); 4992 if (error != 0) { 4993 device_printf(sc->sc_dev, 4994 "%s: could not load firmware %s section, error %d\n", 4995 __func__, ".data", error); 4996 return error; 4997 } 4998 4999 /* Now press "execute". */ 5000 IWN_WRITE(sc, IWN_RESET, 0); 5001 return 0; 5002 } 5003 5004 int 5005 iwn_read_firmware(struct iwn_softc *sc) 5006 { 5007 const struct iwn_hal *hal = sc->sc_hal; 5008 const struct iwn_firmware_hdr *hdr; 5009 struct iwn_fw_info *fw = &sc->fw; 5010 size_t size; 5011 5012 IWN_UNLOCK(sc); 5013 5014 /* Read firmware image from filesystem. */ 5015 sc->fw_fp = firmware_get(sc->fwname); 5016 if (sc->fw_fp == NULL) { 5017 device_printf(sc->sc_dev, 5018 "%s: could not load firmare image \"%s\"\n", __func__, 5019 sc->fwname); 5020 IWN_LOCK(sc); 5021 return EINVAL; 5022 } 5023 IWN_LOCK(sc); 5024 5025 size = sc->fw_fp->datasize; 5026 if (size < sizeof (*hdr)) { 5027 device_printf(sc->sc_dev, 5028 "%s: truncated firmware header: %zu bytes\n", 5029 __func__, size); 5030 return EINVAL; 5031 } 5032 5033 /* Extract firmware header information. */ 5034 hdr = (const struct iwn_firmware_hdr *)sc->fw_fp->data; 5035 fw->main.textsz = le32toh(hdr->main_textsz); 5036 fw->main.datasz = le32toh(hdr->main_datasz); 5037 fw->init.textsz = le32toh(hdr->init_textsz); 5038 fw->init.datasz = le32toh(hdr->init_datasz); 5039 fw->boot.textsz = le32toh(hdr->boot_textsz); 5040 fw->boot.datasz = 0; 5041 5042 /* Sanity-check firmware header. */ 5043 if (fw->main.textsz > hal->fw_text_maxsz || 5044 fw->main.datasz > hal->fw_data_maxsz || 5045 fw->init.textsz > hal->fw_text_maxsz || 5046 fw->init.datasz > hal->fw_data_maxsz || 5047 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5048 (fw->boot.textsz & 3) != 0) { 5049 device_printf(sc->sc_dev, "%s: invalid firmware header\n", 5050 __func__); 5051 return EINVAL; 5052 } 5053 5054 /* Check that all firmware sections fit. */ 5055 if (size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 5056 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5057 device_printf(sc->sc_dev, 5058 "%s: firmware file too short: %zu bytes\n", 5059 __func__, size); 5060 return EINVAL; 5061 } 5062 5063 /* Get pointers to firmware sections. */ 5064 fw->main.text = (const uint8_t *)(hdr + 1); 5065 fw->main.data = fw->main.text + fw->main.textsz; 5066 fw->init.text = fw->main.data + fw->main.datasz; 5067 fw->init.data = fw->init.text + fw->init.textsz; 5068 fw->boot.text = fw->init.data + fw->init.datasz; 5069 5070 return 0; 5071 } 5072 5073 void 5074 iwn_unload_firmware(struct iwn_softc *sc) 5075 { 5076 if (sc->fw_fp != NULL) { 5077 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 5078 sc->fw_fp = NULL; 5079 } 5080 } 5081 5082 int 5083 iwn_clock_wait(struct iwn_softc *sc) 5084 { 5085 int ntries; 5086 5087 /* Set "initialization complete" bit. */ 5088 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5089 5090 /* Wait for clock stabilization. */ 5091 for (ntries = 0; ntries < 25000; ntries++) { 5092 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5093 return 0; 5094 DELAY(100); 5095 } 5096 device_printf(sc->sc_dev, 5097 "%s: timeout waiting for clock stabilization\n", __func__); 5098 return ETIMEDOUT; 5099 } 5100 5101 int 5102 iwn4965_apm_init(struct iwn_softc *sc) 5103 { 5104 int error; 5105 5106 /* Disable L0s. */ 5107 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 5108 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 5109 5110 error = iwn_clock_wait(sc); 5111 if (error != 0) 5112 return error; 5113 5114 error = iwn_nic_lock(sc); 5115 if (error != 0) 5116 return error; 5117 5118 /* Enable DMA. */ 5119 iwn_prph_write(sc, IWN_APMG_CLK_CTRL, 5120 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 5121 DELAY(20); 5122 5123 /* Disable L1. */ 5124 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 5125 iwn_nic_unlock(sc); 5126 5127 return 0; 5128 } 5129 5130 int 5131 iwn5000_apm_init(struct iwn_softc *sc) 5132 { 5133 int error; 5134 5135 /* Disable L0s. */ 5136 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 5137 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 5138 5139 /* Set Flow Handler wait threshold to the maximum. */ 5140 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 5141 5142 /* Enable HAP to move adapter from L1a to L0s. */ 5143 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 5144 5145 if (sc->hw_type != IWN_HW_REV_TYPE_6000 && 5146 sc->hw_type != IWN_HW_REV_TYPE_6050) 5147 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 5148 5149 error = iwn_clock_wait(sc); 5150 if (error != 0) 5151 return error; 5152 5153 error = iwn_nic_lock(sc); 5154 if (error != 0) 5155 return error; 5156 5157 /* Enable DMA. */ 5158 iwn_prph_write(sc, IWN_APMG_CLK_CTRL, IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 5159 DELAY(20); 5160 5161 /* Disable L1. */ 5162 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 5163 iwn_nic_unlock(sc); 5164 5165 return 0; 5166 } 5167 5168 void 5169 iwn_apm_stop_master(struct iwn_softc *sc) 5170 { 5171 int ntries; 5172 5173 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 5174 for (ntries = 0; ntries < 100; ntries++) { 5175 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 5176 return; 5177 DELAY(10); 5178 } 5179 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 5180 __func__); 5181 } 5182 5183 void 5184 iwn_apm_stop(struct iwn_softc *sc) 5185 { 5186 iwn_apm_stop_master(sc); 5187 5188 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 5189 DELAY(10); 5190 /* Clear "initialization complete" bit. */ 5191 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5192 } 5193 5194 int 5195 iwn4965_nic_config(struct iwn_softc *sc) 5196 { 5197 uint32_t tmp; 5198 5199 /* Retrieve PCIe Active State Power Management (ASPM). */ 5200 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5201 if (tmp & 0x02) /* L1 Entry enabled. */ 5202 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5203 else 5204 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5205 5206 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 5207 /* 5208 * I don't believe this to be correct but this is what the 5209 * vendor driver is doing. Probably the bits should not be 5210 * shifted in IWN_RFCFG_*. 5211 */ 5212 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5213 IWN_RFCFG_TYPE(sc->rfcfg) | 5214 IWN_RFCFG_STEP(sc->rfcfg) | 5215 IWN_RFCFG_DASH(sc->rfcfg)); 5216 } 5217 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5218 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5219 return 0; 5220 } 5221 5222 int 5223 iwn5000_nic_config(struct iwn_softc *sc) 5224 { 5225 uint32_t tmp; 5226 int error; 5227 5228 /* Retrieve PCIe Active State Power Management (ASPM). */ 5229 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5230 if (tmp & 0x02) /* L1 Entry enabled. */ 5231 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5232 else 5233 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 5234 5235 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 5236 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5237 IWN_RFCFG_TYPE(sc->rfcfg) | 5238 IWN_RFCFG_STEP(sc->rfcfg) | 5239 IWN_RFCFG_DASH(sc->rfcfg)); 5240 } 5241 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 5242 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 5243 5244 error = iwn_nic_lock(sc); 5245 if (error != 0) 5246 return error; 5247 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 5248 iwn_nic_unlock(sc); 5249 return 0; 5250 } 5251 5252 /* 5253 * Take NIC ownership over Intel Active Management Technology (AMT). 5254 */ 5255 int 5256 iwn_hw_prepare(struct iwn_softc *sc) 5257 { 5258 int ntries; 5259 5260 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 5261 for (ntries = 0; ntries < 15000; ntries++) { 5262 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 5263 IWN_HW_IF_CONFIG_PREPARE_DONE)) 5264 break; 5265 DELAY(10); 5266 } 5267 if (ntries == 15000) 5268 return ETIMEDOUT; 5269 5270 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 5271 for (ntries = 0; ntries < 5; ntries++) { 5272 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 5273 IWN_HW_IF_CONFIG_NIC_READY) 5274 return 0; 5275 DELAY(10); 5276 } 5277 return ETIMEDOUT; 5278 } 5279 5280 int 5281 iwn_hw_init(struct iwn_softc *sc) 5282 { 5283 const struct iwn_hal *hal = sc->sc_hal; 5284 int error, chnl, qid; 5285 5286 /* Clear pending interrupts. */ 5287 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5288 5289 error = hal->apm_init(sc); 5290 if (error != 0) { 5291 device_printf(sc->sc_dev, 5292 "%s: could not power ON adapter, error %d\n", 5293 __func__, error); 5294 return error; 5295 } 5296 5297 /* Select VMAIN power source. */ 5298 error = iwn_nic_lock(sc); 5299 if (error != 0) 5300 return error; 5301 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 5302 iwn_nic_unlock(sc); 5303 5304 /* Perform adapter-specific initialization. */ 5305 error = hal->nic_config(sc); 5306 if (error != 0) 5307 return error; 5308 5309 /* Initialize RX ring. */ 5310 error = iwn_nic_lock(sc); 5311 if (error != 0) 5312 return error; 5313 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 5314 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 5315 /* Set physical address of RX ring (256-byte aligned.) */ 5316 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 5317 /* Set physical address of RX status (16-byte aligned.) */ 5318 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 5319 /* Enable RX. */ 5320 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 5321 IWN_FH_RX_CONFIG_ENA | 5322 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 5323 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 5324 IWN_FH_RX_CONFIG_SINGLE_FRAME | 5325 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 5326 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 5327 iwn_nic_unlock(sc); 5328 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 5329 5330 error = iwn_nic_lock(sc); 5331 if (error != 0) 5332 return error; 5333 5334 /* Initialize TX scheduler. */ 5335 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 5336 5337 /* Set physical address of "keep warm" page (16-byte aligned.) */ 5338 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 5339 5340 /* Initialize TX rings. */ 5341 for (qid = 0; qid < hal->ntxqs; qid++) { 5342 struct iwn_tx_ring *txq = &sc->txq[qid]; 5343 5344 /* Set physical address of TX ring (256-byte aligned.) */ 5345 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 5346 txq->desc_dma.paddr >> 8); 5347 } 5348 iwn_nic_unlock(sc); 5349 5350 /* Enable DMA channels. */ 5351 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 5352 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 5353 IWN_FH_TX_CONFIG_DMA_ENA | 5354 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 5355 } 5356 5357 /* Clear "radio off" and "commands blocked" bits. */ 5358 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5359 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 5360 5361 /* Clear pending interrupts. */ 5362 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5363 /* Enable interrupt coalescing. */ 5364 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 5365 /* Enable interrupts. */ 5366 IWN_WRITE(sc, IWN_MASK, IWN_INT_MASK); 5367 5368 /* _Really_ make sure "radio off" bit is cleared! */ 5369 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5370 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 5371 5372 error = hal->load_firmware(sc); 5373 if (error != 0) { 5374 device_printf(sc->sc_dev, 5375 "%s: could not load firmware, error %d\n", 5376 __func__, error); 5377 return error; 5378 } 5379 /* Wait at most one second for firmware alive notification. */ 5380 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5381 if (error != 0) { 5382 device_printf(sc->sc_dev, 5383 "%s: timeout waiting for adapter to initialize, error %d\n", 5384 __func__, error); 5385 return error; 5386 } 5387 /* Do post-firmware initialization. */ 5388 return hal->post_alive(sc); 5389 } 5390 5391 void 5392 iwn_hw_stop(struct iwn_softc *sc) 5393 { 5394 const struct iwn_hal *hal = sc->sc_hal; 5395 uint32_t tmp; 5396 int chnl, qid, ntries; 5397 5398 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 5399 5400 /* Disable interrupts. */ 5401 IWN_WRITE(sc, IWN_MASK, 0); 5402 IWN_WRITE(sc, IWN_INT, 0xffffffff); 5403 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 5404 5405 /* Make sure we no longer hold the NIC lock. */ 5406 iwn_nic_unlock(sc); 5407 5408 /* Stop TX scheduler. */ 5409 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 5410 5411 /* Stop all DMA channels. */ 5412 if (iwn_nic_lock(sc) == 0) { 5413 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 5414 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 5415 for (ntries = 0; ntries < 200; ntries++) { 5416 tmp = IWN_READ(sc, IWN_FH_TX_STATUS); 5417 if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) == 5418 IWN_FH_TX_STATUS_IDLE(chnl)) 5419 break; 5420 DELAY(10); 5421 } 5422 } 5423 iwn_nic_unlock(sc); 5424 } 5425 5426 /* Stop RX ring. */ 5427 iwn_reset_rx_ring(sc, &sc->rxq); 5428 5429 /* Reset all TX rings. */ 5430 for (qid = 0; qid < hal->ntxqs; qid++) 5431 iwn_reset_tx_ring(sc, &sc->txq[qid]); 5432 5433 if (iwn_nic_lock(sc) == 0) { 5434 iwn_prph_write(sc, IWN_APMG_CLK_DIS, IWN_APMG_CLK_DMA_RQT); 5435 iwn_nic_unlock(sc); 5436 } 5437 DELAY(5); 5438 5439 /* Power OFF adapter. */ 5440 iwn_apm_stop(sc); 5441 } 5442 5443 void 5444 iwn_init_locked(struct iwn_softc *sc) 5445 { 5446 struct ifnet *ifp = sc->sc_ifp; 5447 int error; 5448 5449 IWN_LOCK_ASSERT(sc); 5450 5451 iwn_stop_locked(sc); 5452 5453 error = iwn_hw_prepare(sc); 5454 if (error != 0) { 5455 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n", 5456 __func__, error); 5457 goto fail; 5458 } 5459 5460 /* Check that the radio is not disabled by hardware switch. */ 5461 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 5462 device_printf(sc->sc_dev, 5463 "%s: radio is disabled by hardware switch\n", 5464 __func__); 5465 error = EPERM; /* :-) */ 5466 goto fail; 5467 } 5468 5469 /* Read firmware images from the filesystem. */ 5470 error = iwn_read_firmware(sc); 5471 if (error != 0) { 5472 device_printf(sc->sc_dev, 5473 "%s: could not read firmware, error %d\n", 5474 __func__, error); 5475 goto fail; 5476 } 5477 5478 /* Initialize hardware and upload firmware. */ 5479 error = iwn_hw_init(sc); 5480 if (error != 0) { 5481 device_printf(sc->sc_dev, 5482 "%s: could not initialize hardware, error %d\n", 5483 __func__, error); 5484 goto fail; 5485 } 5486 5487 /* Configure adapter now that it is ready. */ 5488 error = iwn_config(sc); 5489 if (error != 0) { 5490 device_printf(sc->sc_dev, 5491 "%s: could not configure device, error %d\n", 5492 __func__, error); 5493 goto fail; 5494 } 5495 5496 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5497 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5498 5499 return; 5500 5501 fail: 5502 iwn_stop_locked(sc); 5503 } 5504 5505 void 5506 iwn_init(void *arg) 5507 { 5508 struct iwn_softc *sc = arg; 5509 struct ifnet *ifp = sc->sc_ifp; 5510 struct ieee80211com *ic = ifp->if_l2com; 5511 5512 IWN_LOCK(sc); 5513 iwn_init_locked(sc); 5514 IWN_UNLOCK(sc); 5515 5516 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5517 ieee80211_start_all(ic); 5518 } 5519 5520 void 5521 iwn_stop_locked(struct iwn_softc *sc) 5522 { 5523 struct ifnet *ifp = sc->sc_ifp; 5524 5525 IWN_LOCK_ASSERT(sc); 5526 5527 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 5528 5529 sc->sc_tx_timer = 0; 5530 callout_stop(&sc->sc_timer_to); 5531 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5532 5533 /* Power OFF hardware. */ 5534 iwn_hw_stop(sc); 5535 } 5536 5537 5538 void 5539 iwn_stop(struct iwn_softc *sc) 5540 { 5541 IWN_LOCK(sc); 5542 iwn_stop_locked(sc); 5543 IWN_UNLOCK(sc); 5544 } 5545 5546 /* 5547 * Callback from net80211 to start a scan. 5548 */ 5549 static void 5550 iwn_scan_start(struct ieee80211com *ic) 5551 { 5552 struct ifnet *ifp = ic->ic_ifp; 5553 struct iwn_softc *sc = ifp->if_softc; 5554 5555 IWN_LOCK(sc); 5556 /* make the link LED blink while we're scanning */ 5557 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 5558 IWN_UNLOCK(sc); 5559 } 5560 5561 /* 5562 * Callback from net80211 to terminate a scan. 5563 */ 5564 static void 5565 iwn_scan_end(struct ieee80211com *ic) 5566 { 5567 /* ignore */ 5568 } 5569 5570 /* 5571 * Callback from net80211 to force a channel change. 5572 */ 5573 static void 5574 iwn_set_channel(struct ieee80211com *ic) 5575 { 5576 const struct ieee80211_channel *c = ic->ic_curchan; 5577 struct ifnet *ifp = ic->ic_ifp; 5578 struct iwn_softc *sc = ifp->if_softc; 5579 struct ieee80211vap *vap; 5580 int error; 5581 5582 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 5583 5584 IWN_LOCK(sc); 5585 if (c != sc->sc_curchan) { 5586 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5587 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5588 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5589 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5590 5591 error = iwn_config(sc); 5592 if (error != 0) { 5593 DPRINTF(sc, IWN_DEBUG_STATE, 5594 "%s: set chan failed, cancel scan\n", 5595 __func__); 5596 //XXX Handle failed scan correctly 5597 ieee80211_cancel_scan(vap); 5598 } 5599 } 5600 IWN_UNLOCK(sc); 5601 } 5602 5603 /* 5604 * Callback from net80211 to start scanning of the current channel. 5605 */ 5606 static void 5607 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5608 { 5609 struct ieee80211vap *vap = ss->ss_vap; 5610 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5611 int error; 5612 5613 IWN_LOCK(sc); 5614 error = iwn_scan(sc); 5615 IWN_UNLOCK(sc); 5616 if (error != 0) 5617 ieee80211_cancel_scan(vap); 5618 } 5619 5620 /* 5621 * Callback from net80211 to handle the minimum dwell time being met. 5622 * The intent is to terminate the scan but we just let the firmware 5623 * notify us when it's finished as we have no safe way to abort it. 5624 */ 5625 static void 5626 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 5627 { 5628 /* NB: don't try to abort scan; wait for firmware to finish */ 5629 } 5630 5631 static void 5632 iwn_hw_reset(void *arg0, int pending) 5633 { 5634 struct iwn_softc *sc = arg0; 5635 struct ifnet *ifp = sc->sc_ifp; 5636 struct ieee80211com *ic = ifp->if_l2com; 5637 5638 iwn_init(sc); 5639 ieee80211_notify_radio(ic, 1); 5640 } 5641 5642 static void 5643 iwn_radio_on(void *arg0, int pending) 5644 { 5645 struct iwn_softc *sc = arg0; 5646 5647 iwn_init(sc); 5648 } 5649 5650 static void 5651 iwn_radio_off(void *arg0, int pending) 5652 { 5653 struct iwn_softc *sc = arg0; 5654 struct ifnet *ifp = sc->sc_ifp; 5655 struct ieee80211com *ic = ifp->if_l2com; 5656 5657 IWN_LOCK(sc); 5658 ieee80211_notify_radio(ic, 0); 5659 iwn_stop_locked(sc); 5660 IWN_UNLOCK(sc); 5661 } 5662 5663 static void 5664 iwn_sysctlattach(struct iwn_softc *sc) 5665 { 5666 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 5667 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 5668 5669 #ifdef IWN_DEBUG 5670 sc->sc_debug = 0; 5671 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5672 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 5673 #endif 5674 } 5675 5676 static int 5677 iwn_shutdown(device_t dev) 5678 { 5679 struct iwn_softc *sc = device_get_softc(dev); 5680 5681 iwn_stop(sc); 5682 return 0; 5683 } 5684 5685 static int 5686 iwn_suspend(device_t dev) 5687 { 5688 struct iwn_softc *sc = device_get_softc(dev); 5689 5690 iwn_stop(sc); 5691 return 0; 5692 } 5693 5694 static int 5695 iwn_resume(device_t dev) 5696 { 5697 struct iwn_softc *sc = device_get_softc(dev); 5698 struct ifnet *ifp = sc->sc_ifp; 5699 5700 pci_write_config(dev, 0x41, 0, 1); 5701 5702 if (ifp->if_flags & IFF_UP) 5703 iwn_init(sc); 5704 return 0; 5705 } 5706 5707 #ifdef IWN_DEBUG 5708 static const char * 5709 iwn_intr_str(uint8_t cmd) 5710 { 5711 switch (cmd) { 5712 /* Notifications */ 5713 case IWN_UC_READY: return "UC_READY"; 5714 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 5715 case IWN_TX_DONE: return "TX_DONE"; 5716 case IWN_START_SCAN: return "START_SCAN"; 5717 case IWN_STOP_SCAN: return "STOP_SCAN"; 5718 case IWN_RX_STATISTICS: return "RX_STATS"; 5719 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 5720 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 5721 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 5722 case IWN_RX_PHY: return "RX_PHY"; 5723 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 5724 case IWN_RX_DONE: return "RX_DONE"; 5725 5726 /* Command Notifications */ 5727 case IWN_CMD_CONFIGURE: return "IWN_CMD_CONFIGURE"; 5728 case IWN_CMD_ASSOCIATE: return "IWN_CMD_ASSOCIATE"; 5729 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 5730 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 5731 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 5732 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 5733 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 5734 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 5735 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 5736 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 5737 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 5738 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 5739 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 5740 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 5741 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 5742 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 5743 } 5744 return "UNKNOWN INTR NOTIF/CMD"; 5745 } 5746 #endif /* IWN_DEBUG */ 5747 5748 static device_method_t iwn_methods[] = { 5749 /* Device interface */ 5750 DEVMETHOD(device_probe, iwn_probe), 5751 DEVMETHOD(device_attach, iwn_attach), 5752 DEVMETHOD(device_detach, iwn_detach), 5753 DEVMETHOD(device_shutdown, iwn_shutdown), 5754 DEVMETHOD(device_suspend, iwn_suspend), 5755 DEVMETHOD(device_resume, iwn_resume), 5756 { 0, 0 } 5757 }; 5758 5759 static driver_t iwn_driver = { 5760 "iwn", 5761 iwn_methods, 5762 sizeof (struct iwn_softc) 5763 }; 5764 static devclass_t iwn_devclass; 5765 5766 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 5767 MODULE_DEPEND(iwn, pci, 1, 1, 1); 5768 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 5769 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 5770 MODULE_DEPEND(iwn, wlan_amrr, 1, 1, 1); 5771