1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 23 * adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/sockio.h> 31 #include <sys/sysctl.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/endian.h> 40 #include <sys/firmware.h> 41 #include <sys/limits.h> 42 #include <sys/module.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <machine/clock.h> 49 50 #include <dev/pci/pcireg.h> 51 #include <dev/pci/pcivar.h> 52 53 #include <net/bpf.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/if_ether.h> 65 #include <netinet/ip.h> 66 67 #include <net80211/ieee80211_var.h> 68 #include <net80211/ieee80211_radiotap.h> 69 #include <net80211/ieee80211_regdomain.h> 70 #include <net80211/ieee80211_ratectl.h> 71 72 #include <dev/iwn/if_iwnreg.h> 73 #include <dev/iwn/if_iwnvar.h> 74 75 static int iwn_probe(device_t); 76 static int iwn_attach(device_t); 77 static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *); 78 static void iwn_radiotap_attach(struct iwn_softc *); 79 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 80 const char name[IFNAMSIZ], int unit, int opmode, 81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 82 const uint8_t mac[IEEE80211_ADDR_LEN]); 83 static void iwn_vap_delete(struct ieee80211vap *); 84 static int iwn_cleanup(device_t); 85 static int iwn_detach(device_t); 86 static int iwn_nic_lock(struct iwn_softc *); 87 static int iwn_eeprom_lock(struct iwn_softc *); 88 static int iwn_init_otprom(struct iwn_softc *); 89 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 90 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 91 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 92 void **, bus_size_t, bus_size_t, int); 93 static void iwn_dma_contig_free(struct iwn_dma_info *); 94 static int iwn_alloc_sched(struct iwn_softc *); 95 static void iwn_free_sched(struct iwn_softc *); 96 static int iwn_alloc_kw(struct iwn_softc *); 97 static void iwn_free_kw(struct iwn_softc *); 98 static int iwn_alloc_ict(struct iwn_softc *); 99 static void iwn_free_ict(struct iwn_softc *); 100 static int iwn_alloc_fwmem(struct iwn_softc *); 101 static void iwn_free_fwmem(struct iwn_softc *); 102 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 103 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 104 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 105 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 106 int); 107 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 108 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 109 static void iwn5000_ict_reset(struct iwn_softc *); 110 static int iwn_read_eeprom(struct iwn_softc *, 111 uint8_t macaddr[IEEE80211_ADDR_LEN]); 112 static void iwn4965_read_eeprom(struct iwn_softc *); 113 static void iwn4965_print_power_group(struct iwn_softc *, int); 114 static void iwn5000_read_eeprom(struct iwn_softc *); 115 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 116 static void iwn_read_eeprom_band(struct iwn_softc *, int); 117 #if 0 /* HT */ 118 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 119 #endif 120 static void iwn_read_eeprom_channels(struct iwn_softc *, int, 121 uint32_t); 122 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 123 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 124 const uint8_t mac[IEEE80211_ADDR_LEN]); 125 static void iwn_newassoc(struct ieee80211_node *, int); 126 static int iwn_media_change(struct ifnet *); 127 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 128 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 129 struct iwn_rx_data *); 130 static void iwn_timer_timeout(void *); 131 static void iwn_calib_reset(struct iwn_softc *); 132 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 133 struct iwn_rx_data *); 134 #if 0 /* HT */ 135 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 136 struct iwn_rx_data *); 137 #endif 138 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 139 struct iwn_rx_data *); 140 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 141 struct iwn_rx_data *); 142 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 143 struct iwn_rx_data *); 144 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 145 uint8_t); 146 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 147 static void iwn_notif_intr(struct iwn_softc *); 148 static void iwn_wakeup_intr(struct iwn_softc *); 149 static void iwn_rftoggle_intr(struct iwn_softc *); 150 static void iwn_fatal_intr(struct iwn_softc *); 151 static void iwn_intr(void *); 152 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 153 uint16_t); 154 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 155 uint16_t); 156 #ifdef notyet 157 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 158 #endif 159 static uint8_t iwn_plcp_signal(int); 160 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 161 struct ieee80211_node *, struct iwn_tx_ring *); 162 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 163 const struct ieee80211_bpf_params *); 164 static void iwn_start(struct ifnet *); 165 static void iwn_start_locked(struct ifnet *); 166 static void iwn_watchdog(struct iwn_softc *sc); 167 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 168 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 169 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 170 int); 171 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 172 int); 173 static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int); 174 static int iwn_add_broadcast_node(struct iwn_softc *, int); 175 static int iwn_wme_update(struct ieee80211com *); 176 static void iwn_update_mcast(struct ifnet *); 177 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 178 static int iwn_set_critical_temp(struct iwn_softc *); 179 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 180 static void iwn4965_power_calibration(struct iwn_softc *, int); 181 static int iwn4965_set_txpower(struct iwn_softc *, 182 struct ieee80211_channel *, int); 183 static int iwn5000_set_txpower(struct iwn_softc *, 184 struct ieee80211_channel *, int); 185 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 186 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 187 static int iwn_get_noise(const struct iwn_rx_general_stats *); 188 static int iwn4965_get_temperature(struct iwn_softc *); 189 static int iwn5000_get_temperature(struct iwn_softc *); 190 static int iwn_init_sensitivity(struct iwn_softc *); 191 static void iwn_collect_noise(struct iwn_softc *, 192 const struct iwn_rx_general_stats *); 193 static int iwn4965_init_gains(struct iwn_softc *); 194 static int iwn5000_init_gains(struct iwn_softc *); 195 static int iwn4965_set_gains(struct iwn_softc *); 196 static int iwn5000_set_gains(struct iwn_softc *); 197 static void iwn_tune_sensitivity(struct iwn_softc *, 198 const struct iwn_rx_stats *); 199 static int iwn_send_sensitivity(struct iwn_softc *); 200 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 201 static int iwn_config(struct iwn_softc *); 202 static int iwn_scan(struct iwn_softc *); 203 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 204 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 205 #if 0 /* HT */ 206 static int iwn_ampdu_rx_start(struct ieee80211com *, 207 struct ieee80211_node *, uint8_t); 208 static void iwn_ampdu_rx_stop(struct ieee80211com *, 209 struct ieee80211_node *, uint8_t); 210 static int iwn_ampdu_tx_start(struct ieee80211com *, 211 struct ieee80211_node *, uint8_t); 212 static void iwn_ampdu_tx_stop(struct ieee80211com *, 213 struct ieee80211_node *, uint8_t); 214 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 215 struct ieee80211_node *, uint8_t, uint16_t); 216 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 217 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 218 struct ieee80211_node *, uint8_t, uint16_t); 219 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 220 #endif 221 static int iwn5000_send_calib_results(struct iwn_softc *); 222 static int iwn5000_save_calib_result(struct iwn_softc *, 223 struct iwn_phy_calib *, int, int); 224 static void iwn5000_free_calib_results(struct iwn_softc *); 225 static int iwn5000_chrystal_calib(struct iwn_softc *); 226 static int iwn5000_send_calib_query(struct iwn_softc *); 227 static int iwn5000_rx_calib_result(struct iwn_softc *, 228 struct iwn_rx_desc *, struct iwn_rx_data *); 229 static int iwn5000_send_wimax_coex(struct iwn_softc *); 230 static int iwn4965_post_alive(struct iwn_softc *); 231 static int iwn5000_post_alive(struct iwn_softc *); 232 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 233 int); 234 static int iwn4965_load_firmware(struct iwn_softc *); 235 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 236 const uint8_t *, int); 237 static int iwn5000_load_firmware(struct iwn_softc *); 238 static int iwn_read_firmware_leg(struct iwn_softc *, 239 struct iwn_fw_info *); 240 static int iwn_read_firmware_tlv(struct iwn_softc *, 241 struct iwn_fw_info *, uint16_t); 242 static int iwn_read_firmware(struct iwn_softc *); 243 static int iwn_clock_wait(struct iwn_softc *); 244 static int iwn_apm_init(struct iwn_softc *); 245 static void iwn_apm_stop_master(struct iwn_softc *); 246 static void iwn_apm_stop(struct iwn_softc *); 247 static int iwn4965_nic_config(struct iwn_softc *); 248 static int iwn5000_nic_config(struct iwn_softc *); 249 static int iwn_hw_prepare(struct iwn_softc *); 250 static int iwn_hw_init(struct iwn_softc *); 251 static void iwn_hw_stop(struct iwn_softc *); 252 static void iwn_init_locked(struct iwn_softc *); 253 static void iwn_init(void *); 254 static void iwn_stop_locked(struct iwn_softc *); 255 static void iwn_stop(struct iwn_softc *); 256 static void iwn_scan_start(struct ieee80211com *); 257 static void iwn_scan_end(struct ieee80211com *); 258 static void iwn_set_channel(struct ieee80211com *); 259 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 260 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 261 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 262 struct ieee80211_channel *); 263 static int iwn_setregdomain(struct ieee80211com *, 264 struct ieee80211_regdomain *, int, 265 struct ieee80211_channel []); 266 static void iwn_hw_reset(void *, int); 267 static void iwn_radio_on(void *, int); 268 static void iwn_radio_off(void *, int); 269 static void iwn_sysctlattach(struct iwn_softc *); 270 static int iwn_shutdown(device_t); 271 static int iwn_suspend(device_t); 272 static int iwn_resume(device_t); 273 274 #define IWN_DEBUG 275 #ifdef IWN_DEBUG 276 enum { 277 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 278 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 279 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 280 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 281 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 282 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 283 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 284 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 285 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 286 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 287 IWN_DEBUG_NODE = 0x00000400, /* node management */ 288 IWN_DEBUG_LED = 0x00000800, /* led management */ 289 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 290 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 291 IWN_DEBUG_ANY = 0xffffffff 292 }; 293 294 #define DPRINTF(sc, m, fmt, ...) do { \ 295 if (sc->sc_debug & (m)) \ 296 printf(fmt, __VA_ARGS__); \ 297 } while (0) 298 299 static const char *iwn_intr_str(uint8_t); 300 #else 301 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 302 #endif 303 304 struct iwn_ident { 305 uint16_t vendor; 306 uint16_t device; 307 const char *name; 308 }; 309 310 static const struct iwn_ident iwn_ident_table [] = { 311 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" }, 312 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" }, 313 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" }, 314 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" }, 315 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" }, 316 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" }, 317 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" }, 318 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" }, 319 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" }, 320 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" }, 321 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" }, 322 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" }, 323 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" }, 324 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" }, 325 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" }, 326 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" }, 327 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" }, 328 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" }, 329 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" }, 330 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" }, 331 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" }, 332 { 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" }, 333 { 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" }, 334 { 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" }, 335 #ifdef notyet 336 { 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" }, 337 { 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" }, 338 { 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" }, 339 { 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" }, 340 { 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" }, 341 #endif 342 { 0, 0, NULL } 343 }; 344 345 static const struct iwn_hal iwn4965_hal = { 346 iwn4965_load_firmware, 347 iwn4965_read_eeprom, 348 iwn4965_post_alive, 349 iwn4965_nic_config, 350 iwn4965_update_sched, 351 iwn4965_get_temperature, 352 iwn4965_get_rssi, 353 iwn4965_set_txpower, 354 iwn4965_init_gains, 355 iwn4965_set_gains, 356 iwn4965_add_node, 357 iwn4965_tx_done, 358 #if 0 /* HT */ 359 iwn4965_ampdu_tx_start, 360 iwn4965_ampdu_tx_stop, 361 #endif 362 IWN4965_NTXQUEUES, 363 IWN4965_NDMACHNLS, 364 IWN4965_ID_BROADCAST, 365 IWN4965_RXONSZ, 366 IWN4965_SCHEDSZ, 367 IWN4965_FW_TEXT_MAXSZ, 368 IWN4965_FW_DATA_MAXSZ, 369 IWN4965_FWSZ, 370 IWN4965_SCHED_TXFACT 371 }; 372 373 static const struct iwn_hal iwn5000_hal = { 374 iwn5000_load_firmware, 375 iwn5000_read_eeprom, 376 iwn5000_post_alive, 377 iwn5000_nic_config, 378 iwn5000_update_sched, 379 iwn5000_get_temperature, 380 iwn5000_get_rssi, 381 iwn5000_set_txpower, 382 iwn5000_init_gains, 383 iwn5000_set_gains, 384 iwn5000_add_node, 385 iwn5000_tx_done, 386 #if 0 /* HT */ 387 iwn5000_ampdu_tx_start, 388 iwn5000_ampdu_tx_stop, 389 #endif 390 IWN5000_NTXQUEUES, 391 IWN5000_NDMACHNLS, 392 IWN5000_ID_BROADCAST, 393 IWN5000_RXONSZ, 394 IWN5000_SCHEDSZ, 395 IWN5000_FW_TEXT_MAXSZ, 396 IWN5000_FW_DATA_MAXSZ, 397 IWN5000_FWSZ, 398 IWN5000_SCHED_TXFACT 399 }; 400 401 static int 402 iwn_probe(device_t dev) 403 { 404 const struct iwn_ident *ident; 405 406 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 407 if (pci_get_vendor(dev) == ident->vendor && 408 pci_get_device(dev) == ident->device) { 409 device_set_desc(dev, ident->name); 410 return 0; 411 } 412 } 413 return ENXIO; 414 } 415 416 static int 417 iwn_attach(device_t dev) 418 { 419 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 420 struct ieee80211com *ic; 421 struct ifnet *ifp; 422 const struct iwn_hal *hal; 423 uint32_t tmp; 424 int i, error, result; 425 uint8_t macaddr[IEEE80211_ADDR_LEN]; 426 427 sc->sc_dev = dev; 428 429 /* 430 * Get the offset of the PCI Express Capability Structure in PCI 431 * Configuration Space. 432 */ 433 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 434 if (error != 0) { 435 device_printf(dev, "PCIe capability structure not found!\n"); 436 return error; 437 } 438 439 /* Clear device-specific "PCI retry timeout" register (41h). */ 440 pci_write_config(dev, 0x41, 0, 1); 441 442 /* Hardware bug workaround. */ 443 tmp = pci_read_config(dev, PCIR_COMMAND, 1); 444 if (tmp & PCIM_CMD_INTxDIS) { 445 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 446 __func__); 447 tmp &= ~PCIM_CMD_INTxDIS; 448 pci_write_config(dev, PCIR_COMMAND, tmp, 1); 449 } 450 451 /* Enable bus-mastering. */ 452 pci_enable_busmaster(dev); 453 454 sc->mem_rid = PCIR_BAR(0); 455 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 456 RF_ACTIVE); 457 if (sc->mem == NULL ) { 458 device_printf(dev, "could not allocate memory resources\n"); 459 error = ENOMEM; 460 return error; 461 } 462 463 sc->sc_st = rman_get_bustag(sc->mem); 464 sc->sc_sh = rman_get_bushandle(sc->mem); 465 sc->irq_rid = 0; 466 if ((result = pci_msi_count(dev)) == 1 && 467 pci_alloc_msi(dev, &result) == 0) 468 sc->irq_rid = 1; 469 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 470 RF_ACTIVE | RF_SHAREABLE); 471 if (sc->irq == NULL) { 472 device_printf(dev, "could not allocate interrupt resource\n"); 473 error = ENOMEM; 474 goto fail; 475 } 476 477 IWN_LOCK_INIT(sc); 478 callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0); 479 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc ); 480 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc ); 481 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc ); 482 483 /* Attach Hardware Abstraction Layer. */ 484 hal = iwn_hal_attach(sc); 485 if (hal == NULL) { 486 error = ENXIO; /* XXX: Wrong error code? */ 487 goto fail; 488 } 489 490 error = iwn_hw_prepare(sc); 491 if (error != 0) { 492 device_printf(dev, "hardware not ready, error %d\n", error); 493 goto fail; 494 } 495 496 /* Allocate DMA memory for firmware transfers. */ 497 error = iwn_alloc_fwmem(sc); 498 if (error != 0) { 499 device_printf(dev, 500 "could not allocate memory for firmware, error %d\n", 501 error); 502 goto fail; 503 } 504 505 /* Allocate "Keep Warm" page. */ 506 error = iwn_alloc_kw(sc); 507 if (error != 0) { 508 device_printf(dev, 509 "could not allocate \"Keep Warm\" page, error %d\n", error); 510 goto fail; 511 } 512 513 /* Allocate ICT table for 5000 Series. */ 514 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 515 (error = iwn_alloc_ict(sc)) != 0) { 516 device_printf(dev, 517 "%s: could not allocate ICT table, error %d\n", 518 __func__, error); 519 goto fail; 520 } 521 522 /* Allocate TX scheduler "rings". */ 523 error = iwn_alloc_sched(sc); 524 if (error != 0) { 525 device_printf(dev, 526 "could not allocate TX scheduler rings, error %d\n", 527 error); 528 goto fail; 529 } 530 531 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */ 532 for (i = 0; i < hal->ntxqs; i++) { 533 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i); 534 if (error != 0) { 535 device_printf(dev, 536 "could not allocate Tx ring %d, error %d\n", 537 i, error); 538 goto fail; 539 } 540 } 541 542 /* Allocate RX ring. */ 543 error = iwn_alloc_rx_ring(sc, &sc->rxq); 544 if (error != 0 ){ 545 device_printf(dev, 546 "could not allocate Rx ring, error %d\n", error); 547 goto fail; 548 } 549 550 /* Clear pending interrupts. */ 551 IWN_WRITE(sc, IWN_INT, 0xffffffff); 552 553 /* Count the number of available chains. */ 554 sc->ntxchains = 555 ((sc->txchainmask >> 2) & 1) + 556 ((sc->txchainmask >> 1) & 1) + 557 ((sc->txchainmask >> 0) & 1); 558 sc->nrxchains = 559 ((sc->rxchainmask >> 2) & 1) + 560 ((sc->rxchainmask >> 1) & 1) + 561 ((sc->rxchainmask >> 0) & 1); 562 563 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 564 if (ifp == NULL) { 565 device_printf(dev, "can not allocate ifnet structure\n"); 566 goto fail; 567 } 568 ic = ifp->if_l2com; 569 570 ic->ic_ifp = ifp; 571 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 572 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 573 574 /* Set device capabilities. */ 575 ic->ic_caps = 576 IEEE80211_C_STA /* station mode supported */ 577 | IEEE80211_C_MONITOR /* monitor mode supported */ 578 | IEEE80211_C_TXPMGT /* tx power management */ 579 | IEEE80211_C_SHSLOT /* short slot time supported */ 580 | IEEE80211_C_WPA 581 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 582 | IEEE80211_C_BGSCAN /* background scanning */ 583 #if 0 584 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 585 #endif 586 | IEEE80211_C_WME /* WME */ 587 ; 588 #if 0 /* HT */ 589 /* XXX disable until HT channel setup works */ 590 ic->ic_htcaps = 591 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ 592 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ 593 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 594 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 595 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ 596 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 597 /* s/w capabilities */ 598 | IEEE80211_HTC_HT /* HT operation */ 599 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 600 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 601 ; 602 603 /* Set HT capabilities. */ 604 ic->ic_htcaps = 605 #if IWN_RBUF_SIZE == 8192 606 IEEE80211_HTCAP_AMSDU7935 | 607 #endif 608 IEEE80211_HTCAP_CBW20_40 | 609 IEEE80211_HTCAP_SGI20 | 610 IEEE80211_HTCAP_SGI40; 611 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 612 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 613 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 614 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 615 else 616 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 617 #endif 618 619 /* Read MAC address, channels, etc from EEPROM. */ 620 error = iwn_read_eeprom(sc, macaddr); 621 if (error != 0) { 622 device_printf(dev, "could not read EEPROM, error %d\n", 623 error); 624 goto fail; 625 } 626 627 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n", 628 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 629 macaddr, ":"); 630 631 #if 0 /* HT */ 632 /* Set supported HT rates. */ 633 ic->ic_sup_mcs[0] = 0xff; 634 if (sc->nrxchains > 1) 635 ic->ic_sup_mcs[1] = 0xff; 636 if (sc->nrxchains > 2) 637 ic->ic_sup_mcs[2] = 0xff; 638 #endif 639 640 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 641 ifp->if_softc = sc; 642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 643 ifp->if_init = iwn_init; 644 ifp->if_ioctl = iwn_ioctl; 645 ifp->if_start = iwn_start; 646 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 647 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 648 IFQ_SET_READY(&ifp->if_snd); 649 650 ieee80211_ifattach(ic, macaddr); 651 ic->ic_vap_create = iwn_vap_create; 652 ic->ic_vap_delete = iwn_vap_delete; 653 ic->ic_raw_xmit = iwn_raw_xmit; 654 ic->ic_node_alloc = iwn_node_alloc; 655 ic->ic_newassoc = iwn_newassoc; 656 ic->ic_wme.wme_update = iwn_wme_update; 657 ic->ic_update_mcast = iwn_update_mcast; 658 ic->ic_scan_start = iwn_scan_start; 659 ic->ic_scan_end = iwn_scan_end; 660 ic->ic_set_channel = iwn_set_channel; 661 ic->ic_scan_curchan = iwn_scan_curchan; 662 ic->ic_scan_mindwell = iwn_scan_mindwell; 663 ic->ic_setregdomain = iwn_setregdomain; 664 #if 0 /* HT */ 665 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 666 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 667 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 668 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 669 #endif 670 671 iwn_radiotap_attach(sc); 672 iwn_sysctlattach(sc); 673 674 /* 675 * Hook our interrupt after all initialization is complete. 676 */ 677 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 678 NULL, iwn_intr, sc, &sc->sc_ih); 679 if (error != 0) { 680 device_printf(dev, "could not set up interrupt, error %d\n", 681 error); 682 goto fail; 683 } 684 685 ieee80211_announce(ic); 686 return 0; 687 fail: 688 iwn_cleanup(dev); 689 return error; 690 } 691 692 static const struct iwn_hal * 693 iwn_hal_attach(struct iwn_softc *sc) 694 { 695 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 696 697 switch (sc->hw_type) { 698 case IWN_HW_REV_TYPE_4965: 699 sc->sc_hal = &iwn4965_hal; 700 sc->limits = &iwn4965_sensitivity_limits; 701 sc->fwname = "iwn4965fw"; 702 sc->txchainmask = IWN_ANT_AB; 703 sc->rxchainmask = IWN_ANT_ABC; 704 break; 705 case IWN_HW_REV_TYPE_5100: 706 sc->sc_hal = &iwn5000_hal; 707 sc->limits = &iwn5000_sensitivity_limits; 708 sc->fwname = "iwn5000fw"; 709 sc->txchainmask = IWN_ANT_B; 710 sc->rxchainmask = IWN_ANT_AB; 711 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 712 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 713 IWN_CALIB_BASE_BAND; 714 break; 715 case IWN_HW_REV_TYPE_5150: 716 sc->sc_hal = &iwn5000_hal; 717 sc->limits = &iwn5150_sensitivity_limits; 718 sc->fwname = "iwn5150fw"; 719 sc->txchainmask = IWN_ANT_A; 720 sc->rxchainmask = IWN_ANT_AB; 721 sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO | 722 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 723 break; 724 case IWN_HW_REV_TYPE_5300: 725 case IWN_HW_REV_TYPE_5350: 726 sc->sc_hal = &iwn5000_hal; 727 sc->limits = &iwn5000_sensitivity_limits; 728 sc->fwname = "iwn5000fw"; 729 sc->txchainmask = IWN_ANT_ABC; 730 sc->rxchainmask = IWN_ANT_ABC; 731 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 732 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 733 IWN_CALIB_BASE_BAND; 734 break; 735 case IWN_HW_REV_TYPE_1000: 736 sc->sc_hal = &iwn5000_hal; 737 sc->limits = &iwn1000_sensitivity_limits; 738 sc->fwname = "iwn1000fw"; 739 sc->txchainmask = IWN_ANT_A; 740 sc->rxchainmask = IWN_ANT_AB; 741 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 742 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 743 IWN_CALIB_BASE_BAND; 744 break; 745 case IWN_HW_REV_TYPE_6000: 746 sc->sc_hal = &iwn5000_hal; 747 sc->limits = &iwn6000_sensitivity_limits; 748 sc->fwname = "iwn6000fw"; 749 switch (pci_get_device(sc->sc_dev)) { 750 case 0x422C: 751 case 0x4239: 752 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 753 sc->txchainmask = IWN_ANT_BC; 754 sc->rxchainmask = IWN_ANT_BC; 755 break; 756 default: 757 sc->txchainmask = IWN_ANT_ABC; 758 sc->rxchainmask = IWN_ANT_ABC; 759 break; 760 } 761 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 762 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 763 break; 764 case IWN_HW_REV_TYPE_6050: 765 sc->sc_hal = &iwn5000_hal; 766 sc->limits = &iwn6000_sensitivity_limits; 767 sc->fwname = "iwn6050fw"; 768 sc->txchainmask = IWN_ANT_AB; 769 sc->rxchainmask = IWN_ANT_AB; 770 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_DC | IWN_CALIB_LO | 771 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 772 break; 773 case IWN_HW_REV_TYPE_6005: 774 sc->sc_hal = &iwn5000_hal; 775 sc->limits = &iwn6000_sensitivity_limits; 776 sc->fwname = "iwn6005fw"; 777 sc->txchainmask = IWN_ANT_AB; 778 sc->rxchainmask = IWN_ANT_AB; 779 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 780 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 781 break; 782 default: 783 device_printf(sc->sc_dev, "adapter type %d not supported\n", 784 sc->hw_type); 785 return NULL; 786 } 787 return sc->sc_hal; 788 } 789 790 /* 791 * Attach the interface to 802.11 radiotap. 792 */ 793 static void 794 iwn_radiotap_attach(struct iwn_softc *sc) 795 { 796 struct ifnet *ifp = sc->sc_ifp; 797 struct ieee80211com *ic = ifp->if_l2com; 798 799 ieee80211_radiotap_attach(ic, 800 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 801 IWN_TX_RADIOTAP_PRESENT, 802 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 803 IWN_RX_RADIOTAP_PRESENT); 804 } 805 806 static struct ieee80211vap * 807 iwn_vap_create(struct ieee80211com *ic, 808 const char name[IFNAMSIZ], int unit, int opmode, int flags, 809 const uint8_t bssid[IEEE80211_ADDR_LEN], 810 const uint8_t mac[IEEE80211_ADDR_LEN]) 811 { 812 struct iwn_vap *ivp; 813 struct ieee80211vap *vap; 814 815 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 816 return NULL; 817 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 818 M_80211_VAP, M_NOWAIT | M_ZERO); 819 if (ivp == NULL) 820 return NULL; 821 vap = &ivp->iv_vap; 822 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 823 vap->iv_bmissthreshold = 10; /* override default */ 824 /* Override with driver methods. */ 825 ivp->iv_newstate = vap->iv_newstate; 826 vap->iv_newstate = iwn_newstate; 827 828 ieee80211_ratectl_init(vap); 829 /* Complete setup. */ 830 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 831 ic->ic_opmode = opmode; 832 return vap; 833 } 834 835 static void 836 iwn_vap_delete(struct ieee80211vap *vap) 837 { 838 struct iwn_vap *ivp = IWN_VAP(vap); 839 840 ieee80211_ratectl_deinit(vap); 841 ieee80211_vap_detach(vap); 842 free(ivp, M_80211_VAP); 843 } 844 845 static int 846 iwn_cleanup(device_t dev) 847 { 848 struct iwn_softc *sc = device_get_softc(dev); 849 struct ifnet *ifp = sc->sc_ifp; 850 struct ieee80211com *ic; 851 int i; 852 853 if (ifp != NULL) { 854 ic = ifp->if_l2com; 855 856 ieee80211_draintask(ic, &sc->sc_reinit_task); 857 ieee80211_draintask(ic, &sc->sc_radioon_task); 858 ieee80211_draintask(ic, &sc->sc_radiooff_task); 859 860 iwn_stop(sc); 861 callout_drain(&sc->sc_timer_to); 862 ieee80211_ifdetach(ic); 863 } 864 865 iwn5000_free_calib_results(sc); 866 867 /* Free DMA resources. */ 868 iwn_free_rx_ring(sc, &sc->rxq); 869 if (sc->sc_hal != NULL) 870 for (i = 0; i < sc->sc_hal->ntxqs; i++) 871 iwn_free_tx_ring(sc, &sc->txq[i]); 872 iwn_free_sched(sc); 873 iwn_free_kw(sc); 874 if (sc->ict != NULL) 875 iwn_free_ict(sc); 876 iwn_free_fwmem(sc); 877 878 if (sc->irq != NULL) { 879 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 880 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 881 if (sc->irq_rid == 1) 882 pci_release_msi(dev); 883 } 884 885 if (sc->mem != NULL) 886 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 887 888 if (ifp != NULL) 889 if_free(ifp); 890 891 IWN_LOCK_DESTROY(sc); 892 return 0; 893 } 894 895 static int 896 iwn_detach(device_t dev) 897 { 898 iwn_cleanup(dev); 899 return 0; 900 } 901 902 static int 903 iwn_nic_lock(struct iwn_softc *sc) 904 { 905 int ntries; 906 907 /* Request exclusive access to NIC. */ 908 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 909 910 /* Spin until we actually get the lock. */ 911 for (ntries = 0; ntries < 1000; ntries++) { 912 if ((IWN_READ(sc, IWN_GP_CNTRL) & 913 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 914 IWN_GP_CNTRL_MAC_ACCESS_ENA) 915 return 0; 916 DELAY(10); 917 } 918 return ETIMEDOUT; 919 } 920 921 static __inline void 922 iwn_nic_unlock(struct iwn_softc *sc) 923 { 924 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 925 } 926 927 static __inline uint32_t 928 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 929 { 930 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 931 IWN_BARRIER_READ_WRITE(sc); 932 return IWN_READ(sc, IWN_PRPH_RDATA); 933 } 934 935 static __inline void 936 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 937 { 938 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 939 IWN_BARRIER_WRITE(sc); 940 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 941 } 942 943 static __inline void 944 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 945 { 946 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 947 } 948 949 static __inline void 950 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 951 { 952 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 953 } 954 955 static __inline void 956 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 957 const uint32_t *data, int count) 958 { 959 for (; count > 0; count--, data++, addr += 4) 960 iwn_prph_write(sc, addr, *data); 961 } 962 963 static __inline uint32_t 964 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 965 { 966 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 967 IWN_BARRIER_READ_WRITE(sc); 968 return IWN_READ(sc, IWN_MEM_RDATA); 969 } 970 971 static __inline void 972 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 973 { 974 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 975 IWN_BARRIER_WRITE(sc); 976 IWN_WRITE(sc, IWN_MEM_WDATA, data); 977 } 978 979 static __inline void 980 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 981 { 982 uint32_t tmp; 983 984 tmp = iwn_mem_read(sc, addr & ~3); 985 if (addr & 3) 986 tmp = (tmp & 0x0000ffff) | data << 16; 987 else 988 tmp = (tmp & 0xffff0000) | data; 989 iwn_mem_write(sc, addr & ~3, tmp); 990 } 991 992 static __inline void 993 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 994 int count) 995 { 996 for (; count > 0; count--, addr += 4) 997 *data++ = iwn_mem_read(sc, addr); 998 } 999 1000 static __inline void 1001 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1002 int count) 1003 { 1004 for (; count > 0; count--, addr += 4) 1005 iwn_mem_write(sc, addr, val); 1006 } 1007 1008 static int 1009 iwn_eeprom_lock(struct iwn_softc *sc) 1010 { 1011 int i, ntries; 1012 1013 for (i = 0; i < 100; i++) { 1014 /* Request exclusive access to EEPROM. */ 1015 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1016 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1017 1018 /* Spin until we actually get the lock. */ 1019 for (ntries = 0; ntries < 100; ntries++) { 1020 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1021 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1022 return 0; 1023 DELAY(10); 1024 } 1025 } 1026 return ETIMEDOUT; 1027 } 1028 1029 static __inline void 1030 iwn_eeprom_unlock(struct iwn_softc *sc) 1031 { 1032 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1033 } 1034 1035 /* 1036 * Initialize access by host to One Time Programmable ROM. 1037 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1038 */ 1039 static int 1040 iwn_init_otprom(struct iwn_softc *sc) 1041 { 1042 uint16_t prev, base, next; 1043 int count, error; 1044 1045 /* Wait for clock stabilization before accessing prph. */ 1046 error = iwn_clock_wait(sc); 1047 if (error != 0) 1048 return error; 1049 1050 error = iwn_nic_lock(sc); 1051 if (error != 0) 1052 return error; 1053 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1054 DELAY(5); 1055 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1056 iwn_nic_unlock(sc); 1057 1058 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1059 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1060 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1061 IWN_RESET_LINK_PWR_MGMT_DIS); 1062 } 1063 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1064 /* Clear ECC status. */ 1065 IWN_SETBITS(sc, IWN_OTP_GP, 1066 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1067 1068 /* 1069 * Find the block before last block (contains the EEPROM image) 1070 * for HW without OTP shadow RAM. 1071 */ 1072 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1073 /* Switch to absolute addressing mode. */ 1074 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1075 base = prev = 0; 1076 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1077 error = iwn_read_prom_data(sc, base, &next, 2); 1078 if (error != 0) 1079 return error; 1080 if (next == 0) /* End of linked-list. */ 1081 break; 1082 prev = base; 1083 base = le16toh(next); 1084 } 1085 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1086 return EIO; 1087 /* Skip "next" word. */ 1088 sc->prom_base = prev + 1; 1089 } 1090 return 0; 1091 } 1092 1093 static int 1094 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1095 { 1096 uint32_t val, tmp; 1097 int ntries; 1098 uint8_t *out = data; 1099 1100 addr += sc->prom_base; 1101 for (; count > 0; count -= 2, addr++) { 1102 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1103 for (ntries = 0; ntries < 10; ntries++) { 1104 val = IWN_READ(sc, IWN_EEPROM); 1105 if (val & IWN_EEPROM_READ_VALID) 1106 break; 1107 DELAY(5); 1108 } 1109 if (ntries == 10) { 1110 device_printf(sc->sc_dev, 1111 "timeout reading ROM at 0x%x\n", addr); 1112 return ETIMEDOUT; 1113 } 1114 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1115 /* OTPROM, check for ECC errors. */ 1116 tmp = IWN_READ(sc, IWN_OTP_GP); 1117 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1118 device_printf(sc->sc_dev, 1119 "OTPROM ECC error at 0x%x\n", addr); 1120 return EIO; 1121 } 1122 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1123 /* Correctable ECC error, clear bit. */ 1124 IWN_SETBITS(sc, IWN_OTP_GP, 1125 IWN_OTP_GP_ECC_CORR_STTS); 1126 } 1127 } 1128 *out++ = val >> 16; 1129 if (count > 1) 1130 *out++ = val >> 24; 1131 } 1132 return 0; 1133 } 1134 1135 static void 1136 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1137 { 1138 if (error != 0) 1139 return; 1140 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1141 *(bus_addr_t *)arg = segs[0].ds_addr; 1142 } 1143 1144 static int 1145 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1146 void **kvap, bus_size_t size, bus_size_t alignment, int flags) 1147 { 1148 int error; 1149 1150 dma->size = size; 1151 dma->tag = NULL; 1152 1153 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1154 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1155 1, size, flags, NULL, NULL, &dma->tag); 1156 if (error != 0) { 1157 device_printf(sc->sc_dev, 1158 "%s: bus_dma_tag_create failed, error %d\n", 1159 __func__, error); 1160 goto fail; 1161 } 1162 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1163 flags | BUS_DMA_ZERO, &dma->map); 1164 if (error != 0) { 1165 device_printf(sc->sc_dev, 1166 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error); 1167 goto fail; 1168 } 1169 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 1170 size, iwn_dma_map_addr, &dma->paddr, flags); 1171 if (error != 0) { 1172 device_printf(sc->sc_dev, 1173 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1174 goto fail; 1175 } 1176 1177 if (kvap != NULL) 1178 *kvap = dma->vaddr; 1179 return 0; 1180 fail: 1181 iwn_dma_contig_free(dma); 1182 return error; 1183 } 1184 1185 static void 1186 iwn_dma_contig_free(struct iwn_dma_info *dma) 1187 { 1188 if (dma->tag != NULL) { 1189 if (dma->map != NULL) { 1190 if (dma->paddr == 0) { 1191 bus_dmamap_sync(dma->tag, dma->map, 1192 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1193 bus_dmamap_unload(dma->tag, dma->map); 1194 } 1195 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); 1196 } 1197 bus_dma_tag_destroy(dma->tag); 1198 } 1199 } 1200 1201 static int 1202 iwn_alloc_sched(struct iwn_softc *sc) 1203 { 1204 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1205 return iwn_dma_contig_alloc(sc, &sc->sched_dma, 1206 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT); 1207 } 1208 1209 static void 1210 iwn_free_sched(struct iwn_softc *sc) 1211 { 1212 iwn_dma_contig_free(&sc->sched_dma); 1213 } 1214 1215 static int 1216 iwn_alloc_kw(struct iwn_softc *sc) 1217 { 1218 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1219 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096, 1220 BUS_DMA_NOWAIT); 1221 } 1222 1223 static void 1224 iwn_free_kw(struct iwn_softc *sc) 1225 { 1226 iwn_dma_contig_free(&sc->kw_dma); 1227 } 1228 1229 static int 1230 iwn_alloc_ict(struct iwn_softc *sc) 1231 { 1232 /* ICT table must be aligned on a 4KB boundary. */ 1233 return iwn_dma_contig_alloc(sc, &sc->ict_dma, 1234 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT); 1235 } 1236 1237 static void 1238 iwn_free_ict(struct iwn_softc *sc) 1239 { 1240 iwn_dma_contig_free(&sc->ict_dma); 1241 } 1242 1243 static int 1244 iwn_alloc_fwmem(struct iwn_softc *sc) 1245 { 1246 /* Must be aligned on a 16-byte boundary. */ 1247 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1248 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT); 1249 } 1250 1251 static void 1252 iwn_free_fwmem(struct iwn_softc *sc) 1253 { 1254 iwn_dma_contig_free(&sc->fw_dma); 1255 } 1256 1257 static int 1258 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1259 { 1260 bus_size_t size; 1261 int i, error; 1262 1263 ring->cur = 0; 1264 1265 /* Allocate RX descriptors (256-byte aligned). */ 1266 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1267 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1268 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1269 if (error != 0) { 1270 device_printf(sc->sc_dev, 1271 "%s: could not allocate Rx ring DMA memory, error %d\n", 1272 __func__, error); 1273 goto fail; 1274 } 1275 1276 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1277 BUS_SPACE_MAXADDR_32BIT, 1278 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, 1279 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1280 if (error != 0) { 1281 device_printf(sc->sc_dev, 1282 "%s: bus_dma_tag_create_failed, error %d\n", 1283 __func__, error); 1284 goto fail; 1285 } 1286 1287 /* Allocate RX status area (16-byte aligned). */ 1288 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, 1289 (void **)&ring->stat, sizeof (struct iwn_rx_status), 1290 16, BUS_DMA_NOWAIT); 1291 if (error != 0) { 1292 device_printf(sc->sc_dev, 1293 "%s: could not allocate Rx status DMA memory, error %d\n", 1294 __func__, error); 1295 goto fail; 1296 } 1297 1298 /* 1299 * Allocate and map RX buffers. 1300 */ 1301 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1302 struct iwn_rx_data *data = &ring->data[i]; 1303 bus_addr_t paddr; 1304 1305 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1306 if (error != 0) { 1307 device_printf(sc->sc_dev, 1308 "%s: bus_dmamap_create failed, error %d\n", 1309 __func__, error); 1310 goto fail; 1311 } 1312 1313 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1314 if (data->m == NULL) { 1315 device_printf(sc->sc_dev, 1316 "%s: could not allocate rx mbuf\n", __func__); 1317 error = ENOMEM; 1318 goto fail; 1319 } 1320 1321 /* Map page. */ 1322 error = bus_dmamap_load(ring->data_dmat, data->map, 1323 mtod(data->m, caddr_t), MJUMPAGESIZE, 1324 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1325 if (error != 0 && error != EFBIG) { 1326 device_printf(sc->sc_dev, 1327 "%s: bus_dmamap_load failed, error %d\n", 1328 __func__, error); 1329 m_freem(data->m); 1330 error = ENOMEM; /* XXX unique code */ 1331 goto fail; 1332 } 1333 bus_dmamap_sync(ring->data_dmat, data->map, 1334 BUS_DMASYNC_PREWRITE); 1335 1336 /* Set physical address of RX buffer (256-byte aligned). */ 1337 ring->desc[i] = htole32(paddr >> 8); 1338 } 1339 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1340 BUS_DMASYNC_PREWRITE); 1341 return 0; 1342 fail: 1343 iwn_free_rx_ring(sc, ring); 1344 return error; 1345 } 1346 1347 static void 1348 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1349 { 1350 int ntries; 1351 1352 if (iwn_nic_lock(sc) == 0) { 1353 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1354 for (ntries = 0; ntries < 1000; ntries++) { 1355 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1356 IWN_FH_RX_STATUS_IDLE) 1357 break; 1358 DELAY(10); 1359 } 1360 iwn_nic_unlock(sc); 1361 #ifdef IWN_DEBUG 1362 if (ntries == 1000) 1363 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 1364 "timeout resetting Rx ring"); 1365 #endif 1366 } 1367 ring->cur = 0; 1368 sc->last_rx_valid = 0; 1369 } 1370 1371 static void 1372 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1373 { 1374 int i; 1375 1376 iwn_dma_contig_free(&ring->desc_dma); 1377 iwn_dma_contig_free(&ring->stat_dma); 1378 1379 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1380 struct iwn_rx_data *data = &ring->data[i]; 1381 1382 if (data->m != NULL) { 1383 bus_dmamap_sync(ring->data_dmat, data->map, 1384 BUS_DMASYNC_POSTREAD); 1385 bus_dmamap_unload(ring->data_dmat, data->map); 1386 m_freem(data->m); 1387 } 1388 if (data->map != NULL) 1389 bus_dmamap_destroy(ring->data_dmat, data->map); 1390 } 1391 } 1392 1393 static int 1394 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1395 { 1396 bus_size_t size; 1397 bus_addr_t paddr; 1398 int i, error; 1399 1400 ring->qid = qid; 1401 ring->queued = 0; 1402 ring->cur = 0; 1403 1404 /* Allocate TX descriptors (256-byte aligned.) */ 1405 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc); 1406 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1407 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1408 if (error != 0) { 1409 device_printf(sc->sc_dev, 1410 "%s: could not allocate TX ring DMA memory, error %d\n", 1411 __func__, error); 1412 goto fail; 1413 } 1414 1415 /* 1416 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1417 * to allocate commands space for other rings. 1418 */ 1419 if (qid > 4) 1420 return 0; 1421 1422 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd); 1423 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, 1424 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT); 1425 if (error != 0) { 1426 device_printf(sc->sc_dev, 1427 "%s: could not allocate TX cmd DMA memory, error %d\n", 1428 __func__, error); 1429 goto fail; 1430 } 1431 1432 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1433 BUS_SPACE_MAXADDR_32BIT, 1434 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, 1435 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1436 if (error != 0) { 1437 device_printf(sc->sc_dev, 1438 "%s: bus_dma_tag_create_failed, error %d\n", 1439 __func__, error); 1440 goto fail; 1441 } 1442 1443 paddr = ring->cmd_dma.paddr; 1444 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1445 struct iwn_tx_data *data = &ring->data[i]; 1446 1447 data->cmd_paddr = paddr; 1448 data->scratch_paddr = paddr + 12; 1449 paddr += sizeof (struct iwn_tx_cmd); 1450 1451 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1452 if (error != 0) { 1453 device_printf(sc->sc_dev, 1454 "%s: bus_dmamap_create failed, error %d\n", 1455 __func__, error); 1456 goto fail; 1457 } 1458 bus_dmamap_sync(ring->data_dmat, data->map, 1459 BUS_DMASYNC_PREWRITE); 1460 } 1461 return 0; 1462 fail: 1463 iwn_free_tx_ring(sc, ring); 1464 return error; 1465 } 1466 1467 static void 1468 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1469 { 1470 int i; 1471 1472 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1473 struct iwn_tx_data *data = &ring->data[i]; 1474 1475 if (data->m != NULL) { 1476 bus_dmamap_unload(ring->data_dmat, data->map); 1477 m_freem(data->m); 1478 data->m = NULL; 1479 } 1480 } 1481 /* Clear TX descriptors. */ 1482 memset(ring->desc, 0, ring->desc_dma.size); 1483 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1484 BUS_DMASYNC_PREWRITE); 1485 sc->qfullmsk &= ~(1 << ring->qid); 1486 ring->queued = 0; 1487 ring->cur = 0; 1488 } 1489 1490 static void 1491 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1492 { 1493 int i; 1494 1495 iwn_dma_contig_free(&ring->desc_dma); 1496 iwn_dma_contig_free(&ring->cmd_dma); 1497 1498 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1499 struct iwn_tx_data *data = &ring->data[i]; 1500 1501 if (data->m != NULL) { 1502 bus_dmamap_sync(ring->data_dmat, data->map, 1503 BUS_DMASYNC_POSTWRITE); 1504 bus_dmamap_unload(ring->data_dmat, data->map); 1505 m_freem(data->m); 1506 } 1507 if (data->map != NULL) 1508 bus_dmamap_destroy(ring->data_dmat, data->map); 1509 } 1510 } 1511 1512 static void 1513 iwn5000_ict_reset(struct iwn_softc *sc) 1514 { 1515 /* Disable interrupts. */ 1516 IWN_WRITE(sc, IWN_INT_MASK, 0); 1517 1518 /* Reset ICT table. */ 1519 memset(sc->ict, 0, IWN_ICT_SIZE); 1520 sc->ict_cur = 0; 1521 1522 /* Set physical address of ICT table (4KB aligned.) */ 1523 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1524 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1525 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1526 1527 /* Enable periodic RX interrupt. */ 1528 sc->int_mask |= IWN_INT_RX_PERIODIC; 1529 /* Switch to ICT interrupt mode in driver. */ 1530 sc->sc_flags |= IWN_FLAG_USE_ICT; 1531 1532 /* Re-enable interrupts. */ 1533 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1534 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1535 } 1536 1537 static int 1538 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1539 { 1540 const struct iwn_hal *hal = sc->sc_hal; 1541 int error; 1542 uint16_t val; 1543 1544 /* Check whether adapter has an EEPROM or an OTPROM. */ 1545 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1546 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1547 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1548 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1549 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1550 1551 /* Adapter has to be powered on for EEPROM access to work. */ 1552 error = iwn_apm_init(sc); 1553 if (error != 0) { 1554 device_printf(sc->sc_dev, 1555 "%s: could not power ON adapter, error %d\n", 1556 __func__, error); 1557 return error; 1558 } 1559 1560 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1561 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1562 return EIO; 1563 } 1564 error = iwn_eeprom_lock(sc); 1565 if (error != 0) { 1566 device_printf(sc->sc_dev, 1567 "%s: could not lock ROM, error %d\n", 1568 __func__, error); 1569 return error; 1570 } 1571 1572 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1573 error = iwn_init_otprom(sc); 1574 if (error != 0) { 1575 device_printf(sc->sc_dev, 1576 "%s: could not initialize OTPROM, error %d\n", 1577 __func__, error); 1578 return error; 1579 } 1580 } 1581 1582 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1583 sc->rfcfg = le16toh(val); 1584 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1585 1586 /* Read MAC address. */ 1587 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1588 1589 /* Read adapter-specific information from EEPROM. */ 1590 hal->read_eeprom(sc); 1591 1592 iwn_apm_stop(sc); /* Power OFF adapter. */ 1593 1594 iwn_eeprom_unlock(sc); 1595 return 0; 1596 } 1597 1598 static void 1599 iwn4965_read_eeprom(struct iwn_softc *sc) 1600 { 1601 uint32_t addr; 1602 int i; 1603 uint16_t val; 1604 1605 /* Read regulatory domain (4 ASCII characters.) */ 1606 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1607 1608 /* Read the list of authorized channels (20MHz ones only.) */ 1609 for (i = 0; i < 5; i++) { 1610 addr = iwn4965_regulatory_bands[i]; 1611 iwn_read_eeprom_channels(sc, i, addr); 1612 } 1613 1614 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1615 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1616 sc->maxpwr2GHz = val & 0xff; 1617 sc->maxpwr5GHz = val >> 8; 1618 /* Check that EEPROM values are within valid range. */ 1619 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1620 sc->maxpwr5GHz = 38; 1621 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1622 sc->maxpwr2GHz = 38; 1623 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1624 sc->maxpwr2GHz, sc->maxpwr5GHz); 1625 1626 /* Read samples for each TX power group. */ 1627 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1628 sizeof sc->bands); 1629 1630 /* Read voltage at which samples were taken. */ 1631 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1632 sc->eeprom_voltage = (int16_t)le16toh(val); 1633 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1634 sc->eeprom_voltage); 1635 1636 #ifdef IWN_DEBUG 1637 /* Print samples. */ 1638 if (sc->sc_debug & IWN_DEBUG_ANY) { 1639 for (i = 0; i < IWN_NBANDS; i++) 1640 iwn4965_print_power_group(sc, i); 1641 } 1642 #endif 1643 } 1644 1645 #ifdef IWN_DEBUG 1646 static void 1647 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1648 { 1649 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1650 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1651 int j, c; 1652 1653 printf("===band %d===\n", i); 1654 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1655 printf("chan1 num=%d\n", chans[0].num); 1656 for (c = 0; c < 2; c++) { 1657 for (j = 0; j < IWN_NSAMPLES; j++) { 1658 printf("chain %d, sample %d: temp=%d gain=%d " 1659 "power=%d pa_det=%d\n", c, j, 1660 chans[0].samples[c][j].temp, 1661 chans[0].samples[c][j].gain, 1662 chans[0].samples[c][j].power, 1663 chans[0].samples[c][j].pa_det); 1664 } 1665 } 1666 printf("chan2 num=%d\n", chans[1].num); 1667 for (c = 0; c < 2; c++) { 1668 for (j = 0; j < IWN_NSAMPLES; j++) { 1669 printf("chain %d, sample %d: temp=%d gain=%d " 1670 "power=%d pa_det=%d\n", c, j, 1671 chans[1].samples[c][j].temp, 1672 chans[1].samples[c][j].gain, 1673 chans[1].samples[c][j].power, 1674 chans[1].samples[c][j].pa_det); 1675 } 1676 } 1677 } 1678 #endif 1679 1680 static void 1681 iwn5000_read_eeprom(struct iwn_softc *sc) 1682 { 1683 struct iwn5000_eeprom_calib_hdr hdr; 1684 int32_t temp, volt; 1685 uint32_t addr, base; 1686 int i; 1687 uint16_t val; 1688 1689 /* Read regulatory domain (4 ASCII characters.) */ 1690 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1691 base = le16toh(val); 1692 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1693 sc->eeprom_domain, 4); 1694 1695 /* Read the list of authorized channels (20MHz ones only.) */ 1696 for (i = 0; i < 5; i++) { 1697 addr = base + iwn5000_regulatory_bands[i]; 1698 iwn_read_eeprom_channels(sc, i, addr); 1699 } 1700 1701 /* Read enhanced TX power information for 6000 Series. */ 1702 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1703 iwn_read_eeprom_enhinfo(sc); 1704 1705 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1706 base = le16toh(val); 1707 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1708 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1709 "%s: calib version=%u pa type=%u voltage=%u\n", 1710 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1711 sc->calib_ver = hdr.version; 1712 1713 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1714 /* Compute temperature offset. */ 1715 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1716 temp = le16toh(val); 1717 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1718 volt = le16toh(val); 1719 sc->temp_off = temp - (volt / -5); 1720 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1721 temp, volt, sc->temp_off); 1722 } 1723 } 1724 1725 /* 1726 * Translate EEPROM flags to net80211. 1727 */ 1728 static uint32_t 1729 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1730 { 1731 uint32_t nflags; 1732 1733 nflags = 0; 1734 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1735 nflags |= IEEE80211_CHAN_PASSIVE; 1736 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1737 nflags |= IEEE80211_CHAN_NOADHOC; 1738 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1739 nflags |= IEEE80211_CHAN_DFS; 1740 /* XXX apparently IBSS may still be marked */ 1741 nflags |= IEEE80211_CHAN_NOADHOC; 1742 } 1743 1744 return nflags; 1745 } 1746 1747 static void 1748 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1749 { 1750 struct ifnet *ifp = sc->sc_ifp; 1751 struct ieee80211com *ic = ifp->if_l2com; 1752 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1753 const struct iwn_chan_band *band = &iwn_bands[n]; 1754 struct ieee80211_channel *c; 1755 int i, chan, nflags; 1756 1757 for (i = 0; i < band->nchan; i++) { 1758 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1759 DPRINTF(sc, IWN_DEBUG_RESET, 1760 "skip chan %d flags 0x%x maxpwr %d\n", 1761 band->chan[i], channels[i].flags, 1762 channels[i].maxpwr); 1763 continue; 1764 } 1765 chan = band->chan[i]; 1766 nflags = iwn_eeprom_channel_flags(&channels[i]); 1767 1768 DPRINTF(sc, IWN_DEBUG_RESET, 1769 "add chan %d flags 0x%x maxpwr %d\n", 1770 chan, channels[i].flags, channels[i].maxpwr); 1771 1772 c = &ic->ic_channels[ic->ic_nchans++]; 1773 c->ic_ieee = chan; 1774 c->ic_maxregpower = channels[i].maxpwr; 1775 c->ic_maxpower = 2*c->ic_maxregpower; 1776 1777 /* Save maximum allowed TX power for this channel. */ 1778 sc->maxpwr[chan] = channels[i].maxpwr; 1779 1780 if (n == 0) { /* 2GHz band */ 1781 c->ic_freq = ieee80211_ieee2mhz(chan, 1782 IEEE80211_CHAN_G); 1783 1784 /* G =>'s B is supported */ 1785 c->ic_flags = IEEE80211_CHAN_B | nflags; 1786 1787 c = &ic->ic_channels[ic->ic_nchans++]; 1788 c[0] = c[-1]; 1789 c->ic_flags = IEEE80211_CHAN_G | nflags; 1790 } else { /* 5GHz band */ 1791 c->ic_freq = ieee80211_ieee2mhz(chan, 1792 IEEE80211_CHAN_A); 1793 c->ic_flags = IEEE80211_CHAN_A | nflags; 1794 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1795 } 1796 #if 0 /* HT */ 1797 /* XXX no constraints on using HT20 */ 1798 /* add HT20, HT40 added separately */ 1799 c = &ic->ic_channels[ic->ic_nchans++]; 1800 c[0] = c[-1]; 1801 c->ic_flags |= IEEE80211_CHAN_HT20; 1802 /* XXX NARROW =>'s 1/2 and 1/4 width? */ 1803 #endif 1804 } 1805 } 1806 1807 #if 0 /* HT */ 1808 static void 1809 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1810 { 1811 struct ifnet *ifp = sc->sc_ifp; 1812 struct ieee80211com *ic = ifp->if_l2com; 1813 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1814 const struct iwn_chan_band *band = &iwn_bands[n]; 1815 struct ieee80211_channel *c, *cent, *extc; 1816 int i; 1817 1818 for (i = 0; i < band->nchan; i++) { 1819 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) || 1820 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) { 1821 DPRINTF(sc, IWN_DEBUG_RESET, 1822 "skip chan %d flags 0x%x maxpwr %d\n", 1823 band->chan[i], channels[i].flags, 1824 channels[i].maxpwr); 1825 continue; 1826 } 1827 /* 1828 * Each entry defines an HT40 channel pair; find the 1829 * center channel, then the extension channel above. 1830 */ 1831 cent = ieee80211_find_channel_byieee(ic, band->chan[i], 1832 band->flags & ~IEEE80211_CHAN_HT); 1833 if (cent == NULL) { /* XXX shouldn't happen */ 1834 device_printf(sc->sc_dev, 1835 "%s: no entry for channel %d\n", 1836 __func__, band->chan[i]); 1837 continue; 1838 } 1839 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1840 band->flags & ~IEEE80211_CHAN_HT); 1841 if (extc == NULL) { 1842 DPRINTF(sc, IWN_DEBUG_RESET, 1843 "skip chan %d, extension channel not found\n", 1844 band->chan[i]); 1845 continue; 1846 } 1847 1848 DPRINTF(sc, IWN_DEBUG_RESET, 1849 "add ht40 chan %d flags 0x%x maxpwr %d\n", 1850 band->chan[i], channels[i].flags, channels[i].maxpwr); 1851 1852 c = &ic->ic_channels[ic->ic_nchans++]; 1853 c[0] = cent[0]; 1854 c->ic_extieee = extc->ic_ieee; 1855 c->ic_flags &= ~IEEE80211_CHAN_HT; 1856 c->ic_flags |= IEEE80211_CHAN_HT40U; 1857 c = &ic->ic_channels[ic->ic_nchans++]; 1858 c[0] = extc[0]; 1859 c->ic_extieee = cent->ic_ieee; 1860 c->ic_flags &= ~IEEE80211_CHAN_HT; 1861 c->ic_flags |= IEEE80211_CHAN_HT40D; 1862 } 1863 } 1864 #endif 1865 1866 static void 1867 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1868 { 1869 struct ifnet *ifp = sc->sc_ifp; 1870 struct ieee80211com *ic = ifp->if_l2com; 1871 1872 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 1873 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 1874 1875 if (n < 5) 1876 iwn_read_eeprom_band(sc, n); 1877 #if 0 /* HT */ 1878 else 1879 iwn_read_eeprom_ht40(sc, n); 1880 #endif 1881 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1882 } 1883 1884 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0])) 1885 1886 static void 1887 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1888 { 1889 struct iwn_eeprom_enhinfo enhinfo[35]; 1890 uint16_t val, base; 1891 int8_t maxpwr; 1892 int i; 1893 1894 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1895 base = le16toh(val); 1896 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1897 enhinfo, sizeof enhinfo); 1898 1899 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1900 for (i = 0; i < nitems(enhinfo); i++) { 1901 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1902 continue; /* Skip invalid entries. */ 1903 1904 maxpwr = 0; 1905 if (sc->txchainmask & IWN_ANT_A) 1906 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1907 if (sc->txchainmask & IWN_ANT_B) 1908 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1909 if (sc->txchainmask & IWN_ANT_C) 1910 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1911 if (sc->ntxchains == 2) 1912 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1913 else if (sc->ntxchains == 3) 1914 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1915 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1916 1917 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i, 1918 maxpwr); 1919 sc->enh_maxpwr[i] = maxpwr; 1920 } 1921 } 1922 1923 static struct ieee80211_node * 1924 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1925 { 1926 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 1927 } 1928 1929 static void 1930 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1931 { 1932 /* XXX move */ 1933 ieee80211_ratectl_node_init(ni); 1934 } 1935 1936 static int 1937 iwn_media_change(struct ifnet *ifp) 1938 { 1939 int error = ieee80211_media_change(ifp); 1940 /* NB: only the fixed rate can change and that doesn't need a reset */ 1941 return (error == ENETRESET ? 0 : error); 1942 } 1943 1944 static int 1945 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1946 { 1947 struct iwn_vap *ivp = IWN_VAP(vap); 1948 struct ieee80211com *ic = vap->iv_ic; 1949 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1950 int error; 1951 1952 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1953 ieee80211_state_name[vap->iv_state], 1954 ieee80211_state_name[nstate]); 1955 1956 IEEE80211_UNLOCK(ic); 1957 IWN_LOCK(sc); 1958 callout_stop(&sc->sc_timer_to); 1959 1960 switch (nstate) { 1961 case IEEE80211_S_ASSOC: 1962 if (vap->iv_state != IEEE80211_S_RUN) 1963 break; 1964 /* FALLTHROUGH */ 1965 case IEEE80211_S_AUTH: 1966 if (vap->iv_state == IEEE80211_S_AUTH) 1967 break; 1968 1969 /* 1970 * !AUTH -> AUTH transition requires state reset to handle 1971 * reassociations correctly. 1972 */ 1973 sc->rxon.associd = 0; 1974 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1975 iwn_calib_reset(sc); 1976 error = iwn_auth(sc, vap); 1977 break; 1978 1979 case IEEE80211_S_RUN: 1980 /* 1981 * RUN -> RUN transition; Just restart the timers. 1982 */ 1983 if (vap->iv_state == IEEE80211_S_RUN) { 1984 iwn_calib_reset(sc); 1985 break; 1986 } 1987 1988 /* 1989 * !RUN -> RUN requires setting the association id 1990 * which is done with a firmware cmd. We also defer 1991 * starting the timers until that work is done. 1992 */ 1993 error = iwn_run(sc, vap); 1994 break; 1995 1996 default: 1997 break; 1998 } 1999 IWN_UNLOCK(sc); 2000 IEEE80211_LOCK(ic); 2001 return ivp->iv_newstate(vap, nstate, arg); 2002 } 2003 2004 /* 2005 * Process an RX_PHY firmware notification. This is usually immediately 2006 * followed by an MPDU_RX_DONE notification. 2007 */ 2008 static void 2009 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2010 struct iwn_rx_data *data) 2011 { 2012 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2013 2014 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2015 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2016 2017 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2018 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2019 sc->last_rx_valid = 1; 2020 } 2021 2022 static void 2023 iwn_timer_timeout(void *arg) 2024 { 2025 struct iwn_softc *sc = arg; 2026 uint32_t flags = 0; 2027 2028 IWN_LOCK_ASSERT(sc); 2029 2030 if (sc->calib_cnt && --sc->calib_cnt == 0) { 2031 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2032 "send statistics request"); 2033 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2034 sizeof flags, 1); 2035 sc->calib_cnt = 60; /* do calibration every 60s */ 2036 } 2037 iwn_watchdog(sc); /* NB: piggyback tx watchdog */ 2038 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2039 } 2040 2041 static void 2042 iwn_calib_reset(struct iwn_softc *sc) 2043 { 2044 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2045 sc->calib_cnt = 60; /* do calibration every 60s */ 2046 } 2047 2048 /* 2049 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2050 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2051 */ 2052 static void 2053 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2054 struct iwn_rx_data *data) 2055 { 2056 const struct iwn_hal *hal = sc->sc_hal; 2057 struct ifnet *ifp = sc->sc_ifp; 2058 struct ieee80211com *ic = ifp->if_l2com; 2059 struct iwn_rx_ring *ring = &sc->rxq; 2060 struct ieee80211_frame *wh; 2061 struct ieee80211_node *ni; 2062 struct mbuf *m, *m1; 2063 struct iwn_rx_stat *stat; 2064 caddr_t head; 2065 bus_addr_t paddr; 2066 uint32_t flags; 2067 int error, len, rssi, nf; 2068 2069 if (desc->type == IWN_MPDU_RX_DONE) { 2070 /* Check for prior RX_PHY notification. */ 2071 if (!sc->last_rx_valid) { 2072 DPRINTF(sc, IWN_DEBUG_ANY, 2073 "%s: missing RX_PHY\n", __func__); 2074 ifp->if_ierrors++; 2075 return; 2076 } 2077 sc->last_rx_valid = 0; 2078 stat = &sc->last_rx_stat; 2079 } else 2080 stat = (struct iwn_rx_stat *)(desc + 1); 2081 2082 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2083 2084 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2085 device_printf(sc->sc_dev, 2086 "%s: invalid rx statistic header, len %d\n", 2087 __func__, stat->cfg_phy_len); 2088 ifp->if_ierrors++; 2089 return; 2090 } 2091 if (desc->type == IWN_MPDU_RX_DONE) { 2092 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2093 head = (caddr_t)(mpdu + 1); 2094 len = le16toh(mpdu->len); 2095 } else { 2096 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2097 len = le16toh(stat->len); 2098 } 2099 2100 flags = le32toh(*(uint32_t *)(head + len)); 2101 2102 /* Discard frames with a bad FCS early. */ 2103 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2104 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n", 2105 __func__, flags); 2106 ifp->if_ierrors++; 2107 return; 2108 } 2109 /* Discard frames that are too short. */ 2110 if (len < sizeof (*wh)) { 2111 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2112 __func__, len); 2113 ifp->if_ierrors++; 2114 return; 2115 } 2116 2117 /* XXX don't need mbuf, just dma buffer */ 2118 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 2119 if (m1 == NULL) { 2120 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2121 __func__); 2122 ifp->if_ierrors++; 2123 return; 2124 } 2125 bus_dmamap_unload(ring->data_dmat, data->map); 2126 2127 error = bus_dmamap_load(ring->data_dmat, data->map, 2128 mtod(m1, caddr_t), MJUMPAGESIZE, 2129 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2130 if (error != 0 && error != EFBIG) { 2131 device_printf(sc->sc_dev, 2132 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2133 m_freem(m1); 2134 ifp->if_ierrors++; 2135 return; 2136 } 2137 2138 m = data->m; 2139 data->m = m1; 2140 /* Update RX descriptor. */ 2141 ring->desc[ring->cur] = htole32(paddr >> 8); 2142 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2143 BUS_DMASYNC_PREWRITE); 2144 2145 /* Finalize mbuf. */ 2146 m->m_pkthdr.rcvif = ifp; 2147 m->m_data = head; 2148 m->m_pkthdr.len = m->m_len = len; 2149 2150 rssi = hal->get_rssi(sc, stat); 2151 2152 /* Grab a reference to the source node. */ 2153 wh = mtod(m, struct ieee80211_frame *); 2154 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2155 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2156 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2157 2158 if (ieee80211_radiotap_active(ic)) { 2159 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2160 2161 tap->wr_tsft = htole64(stat->tstamp); 2162 tap->wr_flags = 0; 2163 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2164 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2165 switch (stat->rate) { 2166 /* CCK rates. */ 2167 case 10: tap->wr_rate = 2; break; 2168 case 20: tap->wr_rate = 4; break; 2169 case 55: tap->wr_rate = 11; break; 2170 case 110: tap->wr_rate = 22; break; 2171 /* OFDM rates. */ 2172 case 0xd: tap->wr_rate = 12; break; 2173 case 0xf: tap->wr_rate = 18; break; 2174 case 0x5: tap->wr_rate = 24; break; 2175 case 0x7: tap->wr_rate = 36; break; 2176 case 0x9: tap->wr_rate = 48; break; 2177 case 0xb: tap->wr_rate = 72; break; 2178 case 0x1: tap->wr_rate = 96; break; 2179 case 0x3: tap->wr_rate = 108; break; 2180 /* Unknown rate: should not happen. */ 2181 default: tap->wr_rate = 0; 2182 } 2183 tap->wr_dbm_antsignal = rssi; 2184 tap->wr_dbm_antnoise = nf; 2185 } 2186 2187 IWN_UNLOCK(sc); 2188 2189 /* Send the frame to the 802.11 layer. */ 2190 if (ni != NULL) { 2191 (void) ieee80211_input(ni, m, rssi - nf, nf); 2192 /* Node is no longer needed. */ 2193 ieee80211_free_node(ni); 2194 } else 2195 (void) ieee80211_input_all(ic, m, rssi - nf, nf); 2196 2197 IWN_LOCK(sc); 2198 } 2199 2200 #if 0 /* HT */ 2201 /* Process an incoming Compressed BlockAck. */ 2202 static void 2203 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2204 struct iwn_rx_data *data) 2205 { 2206 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2207 struct iwn_tx_ring *txq; 2208 2209 txq = &sc->txq[letoh16(ba->qid)]; 2210 /* XXX TBD */ 2211 } 2212 #endif 2213 2214 /* 2215 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2216 * The latter is sent by the firmware after each received beacon. 2217 */ 2218 static void 2219 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2220 struct iwn_rx_data *data) 2221 { 2222 const struct iwn_hal *hal = sc->sc_hal; 2223 struct ifnet *ifp = sc->sc_ifp; 2224 struct ieee80211com *ic = ifp->if_l2com; 2225 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2226 struct iwn_calib_state *calib = &sc->calib; 2227 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2228 int temp; 2229 2230 /* Beacon stats are meaningful only when associated and not scanning. */ 2231 if (vap->iv_state != IEEE80211_S_RUN || 2232 (ic->ic_flags & IEEE80211_F_SCAN)) 2233 return; 2234 2235 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2236 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type); 2237 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */ 2238 2239 /* Test if temperature has changed. */ 2240 if (stats->general.temp != sc->rawtemp) { 2241 /* Convert "raw" temperature to degC. */ 2242 sc->rawtemp = stats->general.temp; 2243 temp = hal->get_temperature(sc); 2244 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2245 __func__, temp); 2246 2247 /* Update TX power if need be (4965AGN only.) */ 2248 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2249 iwn4965_power_calibration(sc, temp); 2250 } 2251 2252 if (desc->type != IWN_BEACON_STATISTICS) 2253 return; /* Reply to a statistics request. */ 2254 2255 sc->noise = iwn_get_noise(&stats->rx.general); 2256 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2257 2258 /* Test that RSSI and noise are present in stats report. */ 2259 if (le32toh(stats->rx.general.flags) != 1) { 2260 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2261 "received statistics without RSSI"); 2262 return; 2263 } 2264 2265 if (calib->state == IWN_CALIB_STATE_ASSOC) 2266 iwn_collect_noise(sc, &stats->rx.general); 2267 else if (calib->state == IWN_CALIB_STATE_RUN) 2268 iwn_tune_sensitivity(sc, &stats->rx); 2269 } 2270 2271 /* 2272 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2273 * and 5000 adapters have different incompatible TX status formats. 2274 */ 2275 static void 2276 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2277 struct iwn_rx_data *data) 2278 { 2279 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2280 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2281 2282 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2283 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2284 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2285 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2286 le32toh(stat->status)); 2287 2288 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2289 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2290 } 2291 2292 static void 2293 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2294 struct iwn_rx_data *data) 2295 { 2296 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2297 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2298 2299 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2300 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2301 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2302 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2303 le32toh(stat->status)); 2304 2305 #ifdef notyet 2306 /* Reset TX scheduler slot. */ 2307 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2308 #endif 2309 2310 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2311 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2312 } 2313 2314 /* 2315 * Adapter-independent backend for TX_DONE firmware notifications. 2316 */ 2317 static void 2318 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2319 uint8_t status) 2320 { 2321 struct ifnet *ifp = sc->sc_ifp; 2322 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2323 struct iwn_tx_data *data = &ring->data[desc->idx]; 2324 struct mbuf *m; 2325 struct ieee80211_node *ni; 2326 struct ieee80211vap *vap; 2327 2328 KASSERT(data->ni != NULL, ("no node")); 2329 2330 /* Unmap and free mbuf. */ 2331 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2332 bus_dmamap_unload(ring->data_dmat, data->map); 2333 m = data->m, data->m = NULL; 2334 ni = data->ni, data->ni = NULL; 2335 vap = ni->ni_vap; 2336 2337 if (m->m_flags & M_TXCB) { 2338 /* 2339 * Channels marked for "radar" require traffic to be received 2340 * to unlock before we can transmit. Until traffic is seen 2341 * any attempt to transmit is returned immediately with status 2342 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2343 * happen on first authenticate after scanning. To workaround 2344 * this we ignore a failure of this sort in AUTH state so the 2345 * 802.11 layer will fall back to using a timeout to wait for 2346 * the AUTH reply. This allows the firmware time to see 2347 * traffic so a subsequent retry of AUTH succeeds. It's 2348 * unclear why the firmware does not maintain state for 2349 * channels recently visited as this would allow immediate 2350 * use of the channel after a scan (where we see traffic). 2351 */ 2352 if (status == IWN_TX_FAIL_TX_LOCKED && 2353 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2354 ieee80211_process_callback(ni, m, 0); 2355 else 2356 ieee80211_process_callback(ni, m, 2357 (status & IWN_TX_FAIL) != 0); 2358 } 2359 2360 /* 2361 * Update rate control statistics for the node. 2362 */ 2363 if (status & 0x80) { 2364 ifp->if_oerrors++; 2365 ieee80211_ratectl_tx_complete(vap, ni, 2366 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2367 } else { 2368 ieee80211_ratectl_tx_complete(vap, ni, 2369 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2370 } 2371 m_freem(m); 2372 ieee80211_free_node(ni); 2373 2374 sc->sc_tx_timer = 0; 2375 if (--ring->queued < IWN_TX_RING_LOMARK) { 2376 sc->qfullmsk &= ~(1 << ring->qid); 2377 if (sc->qfullmsk == 0 && 2378 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2379 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2380 iwn_start_locked(ifp); 2381 } 2382 } 2383 } 2384 2385 /* 2386 * Process a "command done" firmware notification. This is where we wakeup 2387 * processes waiting for a synchronous command completion. 2388 */ 2389 static void 2390 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2391 { 2392 struct iwn_tx_ring *ring = &sc->txq[4]; 2393 struct iwn_tx_data *data; 2394 2395 if ((desc->qid & 0xf) != 4) 2396 return; /* Not a command ack. */ 2397 2398 data = &ring->data[desc->idx]; 2399 2400 /* If the command was mapped in an mbuf, free it. */ 2401 if (data->m != NULL) { 2402 bus_dmamap_unload(ring->data_dmat, data->map); 2403 m_freem(data->m); 2404 data->m = NULL; 2405 } 2406 wakeup(&ring->desc[desc->idx]); 2407 } 2408 2409 /* 2410 * Process an INT_FH_RX or INT_SW_RX interrupt. 2411 */ 2412 static void 2413 iwn_notif_intr(struct iwn_softc *sc) 2414 { 2415 struct ifnet *ifp = sc->sc_ifp; 2416 struct ieee80211com *ic = ifp->if_l2com; 2417 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2418 uint16_t hw; 2419 2420 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 2421 BUS_DMASYNC_POSTREAD); 2422 2423 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2424 while (sc->rxq.cur != hw) { 2425 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2426 struct iwn_rx_desc *desc; 2427 2428 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2429 BUS_DMASYNC_POSTREAD); 2430 desc = mtod(data->m, struct iwn_rx_desc *); 2431 2432 DPRINTF(sc, IWN_DEBUG_RECV, 2433 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 2434 __func__, desc->qid & 0xf, desc->idx, desc->flags, 2435 desc->type, iwn_intr_str(desc->type), 2436 le16toh(desc->len)); 2437 2438 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2439 iwn_cmd_done(sc, desc); 2440 2441 switch (desc->type) { 2442 case IWN_RX_PHY: 2443 iwn_rx_phy(sc, desc, data); 2444 break; 2445 2446 case IWN_RX_DONE: /* 4965AGN only. */ 2447 case IWN_MPDU_RX_DONE: 2448 /* An 802.11 frame has been received. */ 2449 iwn_rx_done(sc, desc, data); 2450 break; 2451 2452 #if 0 /* HT */ 2453 case IWN_RX_COMPRESSED_BA: 2454 /* A Compressed BlockAck has been received. */ 2455 iwn_rx_compressed_ba(sc, desc, data); 2456 break; 2457 #endif 2458 2459 case IWN_TX_DONE: 2460 /* An 802.11 frame has been transmitted. */ 2461 sc->sc_hal->tx_done(sc, desc, data); 2462 break; 2463 2464 case IWN_RX_STATISTICS: 2465 case IWN_BEACON_STATISTICS: 2466 iwn_rx_statistics(sc, desc, data); 2467 break; 2468 2469 case IWN_BEACON_MISSED: 2470 { 2471 struct iwn_beacon_missed *miss = 2472 (struct iwn_beacon_missed *)(desc + 1); 2473 int misses; 2474 2475 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2476 BUS_DMASYNC_POSTREAD); 2477 misses = le32toh(miss->consecutive); 2478 2479 /* XXX not sure why we're notified w/ zero */ 2480 if (misses == 0) 2481 break; 2482 DPRINTF(sc, IWN_DEBUG_STATE, 2483 "%s: beacons missed %d/%d\n", __func__, 2484 misses, le32toh(miss->total)); 2485 2486 /* 2487 * If more than 5 consecutive beacons are missed, 2488 * reinitialize the sensitivity state machine. 2489 */ 2490 if (vap->iv_state == IEEE80211_S_RUN && misses > 5) 2491 (void) iwn_init_sensitivity(sc); 2492 if (misses >= vap->iv_bmissthreshold) { 2493 IWN_UNLOCK(sc); 2494 ieee80211_beacon_miss(ic); 2495 IWN_LOCK(sc); 2496 } 2497 break; 2498 } 2499 case IWN_UC_READY: 2500 { 2501 struct iwn_ucode_info *uc = 2502 (struct iwn_ucode_info *)(desc + 1); 2503 2504 /* The microcontroller is ready. */ 2505 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2506 BUS_DMASYNC_POSTREAD); 2507 DPRINTF(sc, IWN_DEBUG_RESET, 2508 "microcode alive notification version=%d.%d " 2509 "subtype=%x alive=%x\n", uc->major, uc->minor, 2510 uc->subtype, le32toh(uc->valid)); 2511 2512 if (le32toh(uc->valid) != 1) { 2513 device_printf(sc->sc_dev, 2514 "microcontroller initialization failed"); 2515 break; 2516 } 2517 if (uc->subtype == IWN_UCODE_INIT) { 2518 /* Save microcontroller report. */ 2519 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2520 } 2521 /* Save the address of the error log in SRAM. */ 2522 sc->errptr = le32toh(uc->errptr); 2523 break; 2524 } 2525 case IWN_STATE_CHANGED: 2526 { 2527 uint32_t *status = (uint32_t *)(desc + 1); 2528 2529 /* 2530 * State change allows hardware switch change to be 2531 * noted. However, we handle this in iwn_intr as we 2532 * get both the enable/disble intr. 2533 */ 2534 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2535 BUS_DMASYNC_POSTREAD); 2536 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 2537 le32toh(*status)); 2538 break; 2539 } 2540 case IWN_START_SCAN: 2541 { 2542 struct iwn_start_scan *scan = 2543 (struct iwn_start_scan *)(desc + 1); 2544 2545 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2546 BUS_DMASYNC_POSTREAD); 2547 DPRINTF(sc, IWN_DEBUG_ANY, 2548 "%s: scanning channel %d status %x\n", 2549 __func__, scan->chan, le32toh(scan->status)); 2550 break; 2551 } 2552 case IWN_STOP_SCAN: 2553 { 2554 struct iwn_stop_scan *scan = 2555 (struct iwn_stop_scan *)(desc + 1); 2556 2557 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2558 BUS_DMASYNC_POSTREAD); 2559 DPRINTF(sc, IWN_DEBUG_STATE, 2560 "scan finished nchan=%d status=%d chan=%d\n", 2561 scan->nchan, scan->status, scan->chan); 2562 2563 IWN_UNLOCK(sc); 2564 ieee80211_scan_next(vap); 2565 IWN_LOCK(sc); 2566 break; 2567 } 2568 case IWN5000_CALIBRATION_RESULT: 2569 iwn5000_rx_calib_result(sc, desc, data); 2570 break; 2571 2572 case IWN5000_CALIBRATION_DONE: 2573 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2574 wakeup(sc); 2575 break; 2576 } 2577 2578 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2579 } 2580 2581 /* Tell the firmware what we have processed. */ 2582 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2583 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2584 } 2585 2586 /* 2587 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2588 * from power-down sleep mode. 2589 */ 2590 static void 2591 iwn_wakeup_intr(struct iwn_softc *sc) 2592 { 2593 int qid; 2594 2595 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 2596 __func__); 2597 2598 /* Wakeup RX and TX rings. */ 2599 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2600 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) { 2601 struct iwn_tx_ring *ring = &sc->txq[qid]; 2602 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2603 } 2604 } 2605 2606 static void 2607 iwn_rftoggle_intr(struct iwn_softc *sc) 2608 { 2609 struct ifnet *ifp = sc->sc_ifp; 2610 struct ieee80211com *ic = ifp->if_l2com; 2611 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 2612 2613 IWN_LOCK_ASSERT(sc); 2614 2615 device_printf(sc->sc_dev, "RF switch: radio %s\n", 2616 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2617 if (tmp & IWN_GP_CNTRL_RFKILL) 2618 ieee80211_runtask(ic, &sc->sc_radioon_task); 2619 else 2620 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2621 } 2622 2623 /* 2624 * Dump the error log of the firmware when a firmware panic occurs. Although 2625 * we can't debug the firmware because it is neither open source nor free, it 2626 * can help us to identify certain classes of problems. 2627 */ 2628 static void 2629 iwn_fatal_intr(struct iwn_softc *sc) 2630 { 2631 const struct iwn_hal *hal = sc->sc_hal; 2632 struct iwn_fw_dump dump; 2633 int i; 2634 2635 IWN_LOCK_ASSERT(sc); 2636 2637 /* Force a complete recalibration on next init. */ 2638 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2639 2640 /* Check that the error log address is valid. */ 2641 if (sc->errptr < IWN_FW_DATA_BASE || 2642 sc->errptr + sizeof (dump) > 2643 IWN_FW_DATA_BASE + hal->fw_data_maxsz) { 2644 printf("%s: bad firmware error log address 0x%08x\n", 2645 __func__, sc->errptr); 2646 return; 2647 } 2648 if (iwn_nic_lock(sc) != 0) { 2649 printf("%s: could not read firmware error log\n", 2650 __func__); 2651 return; 2652 } 2653 /* Read firmware error log from SRAM. */ 2654 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2655 sizeof (dump) / sizeof (uint32_t)); 2656 iwn_nic_unlock(sc); 2657 2658 if (dump.valid == 0) { 2659 printf("%s: firmware error log is empty\n", 2660 __func__); 2661 return; 2662 } 2663 printf("firmware error log:\n"); 2664 printf(" error type = \"%s\" (0x%08X)\n", 2665 (dump.id < nitems(iwn_fw_errmsg)) ? 2666 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2667 dump.id); 2668 printf(" program counter = 0x%08X\n", dump.pc); 2669 printf(" source line = 0x%08X\n", dump.src_line); 2670 printf(" error data = 0x%08X%08X\n", 2671 dump.error_data[0], dump.error_data[1]); 2672 printf(" branch link = 0x%08X%08X\n", 2673 dump.branch_link[0], dump.branch_link[1]); 2674 printf(" interrupt link = 0x%08X%08X\n", 2675 dump.interrupt_link[0], dump.interrupt_link[1]); 2676 printf(" time = %u\n", dump.time[0]); 2677 2678 /* Dump driver status (TX and RX rings) while we're here. */ 2679 printf("driver status:\n"); 2680 for (i = 0; i < hal->ntxqs; i++) { 2681 struct iwn_tx_ring *ring = &sc->txq[i]; 2682 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2683 i, ring->qid, ring->cur, ring->queued); 2684 } 2685 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2686 } 2687 2688 static void 2689 iwn_intr(void *arg) 2690 { 2691 struct iwn_softc *sc = arg; 2692 struct ifnet *ifp = sc->sc_ifp; 2693 uint32_t r1, r2, tmp; 2694 2695 IWN_LOCK(sc); 2696 2697 /* Disable interrupts. */ 2698 IWN_WRITE(sc, IWN_INT_MASK, 0); 2699 2700 /* Read interrupts from ICT (fast) or from registers (slow). */ 2701 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2702 tmp = 0; 2703 while (sc->ict[sc->ict_cur] != 0) { 2704 tmp |= sc->ict[sc->ict_cur]; 2705 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2706 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2707 } 2708 tmp = le32toh(tmp); 2709 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2710 tmp = 0; 2711 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2712 tmp |= 0x8000; 2713 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2714 r2 = 0; /* Unused. */ 2715 } else { 2716 r1 = IWN_READ(sc, IWN_INT); 2717 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2718 return; /* Hardware gone! */ 2719 r2 = IWN_READ(sc, IWN_FH_INT); 2720 } 2721 2722 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 2723 2724 if (r1 == 0 && r2 == 0) 2725 goto done; /* Interrupt not for us. */ 2726 2727 /* Acknowledge interrupts. */ 2728 IWN_WRITE(sc, IWN_INT, r1); 2729 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2730 IWN_WRITE(sc, IWN_FH_INT, r2); 2731 2732 if (r1 & IWN_INT_RF_TOGGLED) { 2733 iwn_rftoggle_intr(sc); 2734 goto done; 2735 } 2736 if (r1 & IWN_INT_CT_REACHED) { 2737 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 2738 __func__); 2739 } 2740 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2741 iwn_fatal_intr(sc); 2742 ifp->if_flags &= ~IFF_UP; 2743 iwn_stop_locked(sc); 2744 goto done; 2745 } 2746 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2747 (r2 & IWN_FH_INT_RX)) { 2748 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2749 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2750 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2751 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2752 IWN_INT_PERIODIC_DIS); 2753 iwn_notif_intr(sc); 2754 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2755 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2756 IWN_INT_PERIODIC_ENA); 2757 } 2758 } else 2759 iwn_notif_intr(sc); 2760 } 2761 2762 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2763 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2764 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2765 wakeup(sc); /* FH DMA transfer completed. */ 2766 } 2767 2768 if (r1 & IWN_INT_ALIVE) 2769 wakeup(sc); /* Firmware is alive. */ 2770 2771 if (r1 & IWN_INT_WAKEUP) 2772 iwn_wakeup_intr(sc); 2773 2774 done: 2775 /* Re-enable interrupts. */ 2776 if (ifp->if_flags & IFF_UP) 2777 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2778 2779 IWN_UNLOCK(sc); 2780 } 2781 2782 /* 2783 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2784 * 5000 adapters use a slightly different format.) 2785 */ 2786 static void 2787 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2788 uint16_t len) 2789 { 2790 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2791 2792 *w = htole16(len + 8); 2793 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2794 BUS_DMASYNC_PREWRITE); 2795 if (idx < IWN_SCHED_WINSZ) { 2796 *(w + IWN_TX_RING_COUNT) = *w; 2797 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2798 BUS_DMASYNC_PREWRITE); 2799 } 2800 } 2801 2802 static void 2803 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2804 uint16_t len) 2805 { 2806 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2807 2808 *w = htole16(id << 12 | (len + 8)); 2809 2810 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2811 BUS_DMASYNC_PREWRITE); 2812 if (idx < IWN_SCHED_WINSZ) { 2813 *(w + IWN_TX_RING_COUNT) = *w; 2814 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2815 BUS_DMASYNC_PREWRITE); 2816 } 2817 } 2818 2819 #ifdef notyet 2820 static void 2821 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2822 { 2823 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2824 2825 *w = (*w & htole16(0xf000)) | htole16(1); 2826 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2827 BUS_DMASYNC_PREWRITE); 2828 if (idx < IWN_SCHED_WINSZ) { 2829 *(w + IWN_TX_RING_COUNT) = *w; 2830 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2831 BUS_DMASYNC_PREWRITE); 2832 } 2833 } 2834 #endif 2835 2836 static uint8_t 2837 iwn_plcp_signal(int rate) { 2838 int i; 2839 2840 for (i = 0; i < IWN_RIDX_MAX + 1; i++) { 2841 if (rate == iwn_rates[i].rate) 2842 return i; 2843 } 2844 2845 return 0; 2846 } 2847 2848 static int 2849 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 2850 struct iwn_tx_ring *ring) 2851 { 2852 const struct iwn_hal *hal = sc->sc_hal; 2853 const struct ieee80211_txparam *tp; 2854 const struct iwn_rate *rinfo; 2855 struct ieee80211vap *vap = ni->ni_vap; 2856 struct ieee80211com *ic = ni->ni_ic; 2857 struct iwn_node *wn = (void *)ni; 2858 struct iwn_tx_desc *desc; 2859 struct iwn_tx_data *data; 2860 struct iwn_tx_cmd *cmd; 2861 struct iwn_cmd_data *tx; 2862 struct ieee80211_frame *wh; 2863 struct ieee80211_key *k = NULL; 2864 struct mbuf *mnew; 2865 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 2866 uint32_t flags; 2867 u_int hdrlen; 2868 int totlen, error, pad, nsegs = 0, i, rate; 2869 uint8_t ridx, type, txant; 2870 2871 IWN_LOCK_ASSERT(sc); 2872 2873 wh = mtod(m, struct ieee80211_frame *); 2874 hdrlen = ieee80211_anyhdrsize(wh); 2875 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2876 2877 desc = &ring->desc[ring->cur]; 2878 data = &ring->data[ring->cur]; 2879 2880 /* Choose a TX rate index. */ 2881 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 2882 if (type == IEEE80211_FC0_TYPE_MGT) 2883 rate = tp->mgmtrate; 2884 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 2885 rate = tp->mcastrate; 2886 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2887 rate = tp->ucastrate; 2888 else { 2889 /* XXX pass pktlen */ 2890 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2891 rate = ni->ni_txrate; 2892 } 2893 ridx = iwn_plcp_signal(rate); 2894 rinfo = &iwn_rates[ridx]; 2895 2896 /* Encrypt the frame if need be. */ 2897 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2898 k = ieee80211_crypto_encap(ni, m); 2899 if (k == NULL) { 2900 m_freem(m); 2901 return ENOBUFS; 2902 } 2903 /* Packet header may have moved, reset our local pointer. */ 2904 wh = mtod(m, struct ieee80211_frame *); 2905 } 2906 totlen = m->m_pkthdr.len; 2907 2908 if (ieee80211_radiotap_active_vap(vap)) { 2909 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2910 2911 tap->wt_flags = 0; 2912 tap->wt_rate = rinfo->rate; 2913 if (k != NULL) 2914 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2915 2916 ieee80211_radiotap_tx(vap, m); 2917 } 2918 2919 /* Prepare TX firmware command. */ 2920 cmd = &ring->cmd[ring->cur]; 2921 cmd->code = IWN_CMD_TX_DATA; 2922 cmd->flags = 0; 2923 cmd->qid = ring->qid; 2924 cmd->idx = ring->cur; 2925 2926 tx = (struct iwn_cmd_data *)cmd->data; 2927 /* NB: No need to clear tx, all fields are reinitialized here. */ 2928 tx->scratch = 0; /* clear "scratch" area */ 2929 2930 flags = 0; 2931 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) 2932 flags |= IWN_TX_NEED_ACK; 2933 if ((wh->i_fc[0] & 2934 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2935 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2936 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2937 2938 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2939 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2940 2941 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2942 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2943 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2944 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2945 flags |= IWN_TX_NEED_RTS; 2946 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2947 ridx >= IWN_RIDX_OFDM6) { 2948 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2949 flags |= IWN_TX_NEED_CTS; 2950 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2951 flags |= IWN_TX_NEED_RTS; 2952 } 2953 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2954 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2955 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2956 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2957 flags |= IWN_TX_NEED_PROTECTION; 2958 } else 2959 flags |= IWN_TX_FULL_TXOP; 2960 } 2961 } 2962 2963 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2964 type != IEEE80211_FC0_TYPE_DATA) 2965 tx->id = hal->broadcast_id; 2966 else 2967 tx->id = wn->id; 2968 2969 if (type == IEEE80211_FC0_TYPE_MGT) { 2970 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2971 2972 /* Tell HW to set timestamp in probe responses. */ 2973 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2974 flags |= IWN_TX_INSERT_TSTAMP; 2975 2976 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2977 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2978 tx->timeout = htole16(3); 2979 else 2980 tx->timeout = htole16(2); 2981 } else 2982 tx->timeout = htole16(0); 2983 2984 if (hdrlen & 3) { 2985 /* First segment length must be a multiple of 4. */ 2986 flags |= IWN_TX_NEED_PADDING; 2987 pad = 4 - (hdrlen & 3); 2988 } else 2989 pad = 0; 2990 2991 tx->len = htole16(totlen); 2992 tx->tid = 0; 2993 tx->rts_ntries = 60; 2994 tx->data_ntries = 15; 2995 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2996 tx->plcp = rinfo->plcp; 2997 tx->rflags = rinfo->flags; 2998 if (tx->id == hal->broadcast_id) { 2999 /* Group or management frame. */ 3000 tx->linkq = 0; 3001 /* XXX Alternate between antenna A and B? */ 3002 txant = IWN_LSB(sc->txchainmask); 3003 tx->rflags |= IWN_RFLAG_ANT(txant); 3004 } else { 3005 tx->linkq = IWN_RIDX_OFDM54 - ridx; 3006 flags |= IWN_TX_LINKQ; /* enable MRR */ 3007 } 3008 3009 /* Set physical address of "scratch area". */ 3010 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3011 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3012 3013 /* Copy 802.11 header in TX command. */ 3014 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3015 3016 /* Trim 802.11 header. */ 3017 m_adj(m, hdrlen); 3018 tx->security = 0; 3019 tx->flags = htole32(flags); 3020 3021 if (m->m_len > 0) { 3022 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3023 m, segs, &nsegs, BUS_DMA_NOWAIT); 3024 if (error == EFBIG) { 3025 /* too many fragments, linearize */ 3026 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3027 if (mnew == NULL) { 3028 device_printf(sc->sc_dev, 3029 "%s: could not defrag mbuf\n", __func__); 3030 m_freem(m); 3031 return ENOBUFS; 3032 } 3033 m = mnew; 3034 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3035 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3036 } 3037 if (error != 0) { 3038 device_printf(sc->sc_dev, 3039 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3040 __func__, error); 3041 m_freem(m); 3042 return error; 3043 } 3044 } 3045 3046 data->m = m; 3047 data->ni = ni; 3048 3049 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3050 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3051 3052 /* Fill TX descriptor. */ 3053 desc->nsegs = 1 + nsegs; 3054 /* First DMA segment is used by the TX command. */ 3055 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3056 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3057 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3058 /* Other DMA segments are for data payload. */ 3059 for (i = 1; i <= nsegs; i++) { 3060 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3061 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3062 segs[i - 1].ds_len << 4); 3063 } 3064 3065 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3066 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3067 BUS_DMASYNC_PREWRITE); 3068 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3069 BUS_DMASYNC_PREWRITE); 3070 3071 #ifdef notyet 3072 /* Update TX scheduler. */ 3073 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3074 #endif 3075 3076 /* Kick TX ring. */ 3077 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3078 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3079 3080 /* Mark TX ring as full if we reach a certain threshold. */ 3081 if (++ring->queued > IWN_TX_RING_HIMARK) 3082 sc->qfullmsk |= 1 << ring->qid; 3083 3084 return 0; 3085 } 3086 3087 static int 3088 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3089 struct ieee80211_node *ni, struct iwn_tx_ring *ring, 3090 const struct ieee80211_bpf_params *params) 3091 { 3092 const struct iwn_hal *hal = sc->sc_hal; 3093 const struct iwn_rate *rinfo; 3094 struct ifnet *ifp = sc->sc_ifp; 3095 struct ieee80211vap *vap = ni->ni_vap; 3096 struct ieee80211com *ic = ifp->if_l2com; 3097 struct iwn_tx_cmd *cmd; 3098 struct iwn_cmd_data *tx; 3099 struct ieee80211_frame *wh; 3100 struct iwn_tx_desc *desc; 3101 struct iwn_tx_data *data; 3102 struct mbuf *mnew; 3103 bus_addr_t paddr; 3104 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 3105 uint32_t flags; 3106 u_int hdrlen; 3107 int totlen, error, pad, nsegs = 0, i, rate; 3108 uint8_t ridx, type, txant; 3109 3110 IWN_LOCK_ASSERT(sc); 3111 3112 wh = mtod(m, struct ieee80211_frame *); 3113 hdrlen = ieee80211_anyhdrsize(wh); 3114 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3115 3116 desc = &ring->desc[ring->cur]; 3117 data = &ring->data[ring->cur]; 3118 3119 /* Choose a TX rate index. */ 3120 rate = params->ibp_rate0; 3121 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3122 /* XXX fall back to mcast/mgmt rate? */ 3123 m_freem(m); 3124 return EINVAL; 3125 } 3126 ridx = iwn_plcp_signal(rate); 3127 rinfo = &iwn_rates[ridx]; 3128 3129 totlen = m->m_pkthdr.len; 3130 3131 /* Prepare TX firmware command. */ 3132 cmd = &ring->cmd[ring->cur]; 3133 cmd->code = IWN_CMD_TX_DATA; 3134 cmd->flags = 0; 3135 cmd->qid = ring->qid; 3136 cmd->idx = ring->cur; 3137 3138 tx = (struct iwn_cmd_data *)cmd->data; 3139 /* NB: No need to clear tx, all fields are reinitialized here. */ 3140 tx->scratch = 0; /* clear "scratch" area */ 3141 3142 flags = 0; 3143 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3144 flags |= IWN_TX_NEED_ACK; 3145 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3146 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3147 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3148 flags &= ~IWN_TX_NEED_RTS; 3149 flags |= IWN_TX_NEED_PROTECTION; 3150 } else 3151 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3152 } 3153 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3154 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3155 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3156 flags &= ~IWN_TX_NEED_CTS; 3157 flags |= IWN_TX_NEED_PROTECTION; 3158 } else 3159 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3160 } 3161 if (type == IEEE80211_FC0_TYPE_MGT) { 3162 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3163 3164 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3165 flags |= IWN_TX_INSERT_TSTAMP; 3166 3167 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3168 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3169 tx->timeout = htole16(3); 3170 else 3171 tx->timeout = htole16(2); 3172 } else 3173 tx->timeout = htole16(0); 3174 3175 if (hdrlen & 3) { 3176 /* First segment length must be a multiple of 4. */ 3177 flags |= IWN_TX_NEED_PADDING; 3178 pad = 4 - (hdrlen & 3); 3179 } else 3180 pad = 0; 3181 3182 if (ieee80211_radiotap_active_vap(vap)) { 3183 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3184 3185 tap->wt_flags = 0; 3186 tap->wt_rate = rate; 3187 3188 ieee80211_radiotap_tx(vap, m); 3189 } 3190 3191 tx->len = htole16(totlen); 3192 tx->tid = 0; 3193 tx->id = hal->broadcast_id; 3194 tx->rts_ntries = params->ibp_try1; 3195 tx->data_ntries = params->ibp_try0; 3196 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3197 tx->plcp = rinfo->plcp; 3198 tx->rflags = rinfo->flags; 3199 /* Group or management frame. */ 3200 tx->linkq = 0; 3201 txant = IWN_LSB(sc->txchainmask); 3202 tx->rflags |= IWN_RFLAG_ANT(txant); 3203 /* Set physical address of "scratch area". */ 3204 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); 3205 tx->loaddr = htole32(IWN_LOADDR(paddr)); 3206 tx->hiaddr = IWN_HIADDR(paddr); 3207 3208 /* Copy 802.11 header in TX command. */ 3209 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3210 3211 /* Trim 802.11 header. */ 3212 m_adj(m, hdrlen); 3213 tx->security = 0; 3214 tx->flags = htole32(flags); 3215 3216 if (m->m_len > 0) { 3217 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3218 m, segs, &nsegs, BUS_DMA_NOWAIT); 3219 if (error == EFBIG) { 3220 /* Too many fragments, linearize. */ 3221 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3222 if (mnew == NULL) { 3223 device_printf(sc->sc_dev, 3224 "%s: could not defrag mbuf\n", __func__); 3225 m_freem(m); 3226 return ENOBUFS; 3227 } 3228 m = mnew; 3229 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3230 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3231 } 3232 if (error != 0) { 3233 device_printf(sc->sc_dev, 3234 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3235 __func__, error); 3236 m_freem(m); 3237 return error; 3238 } 3239 } 3240 3241 data->m = m; 3242 data->ni = ni; 3243 3244 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3245 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3246 3247 /* Fill TX descriptor. */ 3248 desc->nsegs = 1 + nsegs; 3249 /* First DMA segment is used by the TX command. */ 3250 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3251 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3252 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3253 /* Other DMA segments are for data payload. */ 3254 for (i = 1; i <= nsegs; i++) { 3255 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3256 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3257 segs[i - 1].ds_len << 4); 3258 } 3259 3260 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3261 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3262 BUS_DMASYNC_PREWRITE); 3263 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3264 BUS_DMASYNC_PREWRITE); 3265 3266 #ifdef notyet 3267 /* Update TX scheduler. */ 3268 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3269 #endif 3270 3271 /* Kick TX ring. */ 3272 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3273 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3274 3275 /* Mark TX ring as full if we reach a certain threshold. */ 3276 if (++ring->queued > IWN_TX_RING_HIMARK) 3277 sc->qfullmsk |= 1 << ring->qid; 3278 3279 return 0; 3280 } 3281 3282 static int 3283 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3284 const struct ieee80211_bpf_params *params) 3285 { 3286 struct ieee80211com *ic = ni->ni_ic; 3287 struct ifnet *ifp = ic->ic_ifp; 3288 struct iwn_softc *sc = ifp->if_softc; 3289 struct iwn_tx_ring *txq; 3290 int error = 0; 3291 3292 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3293 ieee80211_free_node(ni); 3294 m_freem(m); 3295 return ENETDOWN; 3296 } 3297 3298 IWN_LOCK(sc); 3299 if (params == NULL) 3300 txq = &sc->txq[M_WME_GETAC(m)]; 3301 else 3302 txq = &sc->txq[params->ibp_pri & 3]; 3303 3304 if (params == NULL) { 3305 /* 3306 * Legacy path; interpret frame contents to decide 3307 * precisely how to send the frame. 3308 */ 3309 error = iwn_tx_data(sc, m, ni, txq); 3310 } else { 3311 /* 3312 * Caller supplied explicit parameters to use in 3313 * sending the frame. 3314 */ 3315 error = iwn_tx_data_raw(sc, m, ni, txq, params); 3316 } 3317 if (error != 0) { 3318 /* NB: m is reclaimed on tx failure */ 3319 ieee80211_free_node(ni); 3320 ifp->if_oerrors++; 3321 } 3322 IWN_UNLOCK(sc); 3323 return error; 3324 } 3325 3326 static void 3327 iwn_start(struct ifnet *ifp) 3328 { 3329 struct iwn_softc *sc = ifp->if_softc; 3330 3331 IWN_LOCK(sc); 3332 iwn_start_locked(ifp); 3333 IWN_UNLOCK(sc); 3334 } 3335 3336 static void 3337 iwn_start_locked(struct ifnet *ifp) 3338 { 3339 struct iwn_softc *sc = ifp->if_softc; 3340 struct ieee80211_node *ni; 3341 struct iwn_tx_ring *txq; 3342 struct mbuf *m; 3343 int pri; 3344 3345 IWN_LOCK_ASSERT(sc); 3346 3347 for (;;) { 3348 if (sc->qfullmsk != 0) { 3349 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3350 break; 3351 } 3352 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3353 if (m == NULL) 3354 break; 3355 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3356 pri = M_WME_GETAC(m); 3357 txq = &sc->txq[pri]; 3358 if (iwn_tx_data(sc, m, ni, txq) != 0) { 3359 ifp->if_oerrors++; 3360 ieee80211_free_node(ni); 3361 break; 3362 } 3363 sc->sc_tx_timer = 5; 3364 } 3365 } 3366 3367 static void 3368 iwn_watchdog(struct iwn_softc *sc) 3369 { 3370 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { 3371 struct ifnet *ifp = sc->sc_ifp; 3372 struct ieee80211com *ic = ifp->if_l2com; 3373 3374 if_printf(ifp, "device timeout\n"); 3375 ieee80211_runtask(ic, &sc->sc_reinit_task); 3376 } 3377 } 3378 3379 static int 3380 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3381 { 3382 struct iwn_softc *sc = ifp->if_softc; 3383 struct ieee80211com *ic = ifp->if_l2com; 3384 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3385 struct ifreq *ifr = (struct ifreq *) data; 3386 int error = 0, startall = 0, stop = 0; 3387 3388 switch (cmd) { 3389 case SIOCSIFFLAGS: 3390 IWN_LOCK(sc); 3391 if (ifp->if_flags & IFF_UP) { 3392 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3393 iwn_init_locked(sc); 3394 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 3395 startall = 1; 3396 else 3397 stop = 1; 3398 } 3399 } else { 3400 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3401 iwn_stop_locked(sc); 3402 } 3403 IWN_UNLOCK(sc); 3404 if (startall) 3405 ieee80211_start_all(ic); 3406 else if (vap != NULL && stop) 3407 ieee80211_stop(vap); 3408 break; 3409 case SIOCGIFMEDIA: 3410 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3411 break; 3412 case SIOCGIFADDR: 3413 error = ether_ioctl(ifp, cmd, data); 3414 break; 3415 default: 3416 error = EINVAL; 3417 break; 3418 } 3419 return error; 3420 } 3421 3422 /* 3423 * Send a command to the firmware. 3424 */ 3425 static int 3426 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3427 { 3428 struct iwn_tx_ring *ring = &sc->txq[4]; 3429 struct iwn_tx_desc *desc; 3430 struct iwn_tx_data *data; 3431 struct iwn_tx_cmd *cmd; 3432 struct mbuf *m; 3433 bus_addr_t paddr; 3434 int totlen, error; 3435 3436 IWN_LOCK_ASSERT(sc); 3437 3438 desc = &ring->desc[ring->cur]; 3439 data = &ring->data[ring->cur]; 3440 totlen = 4 + size; 3441 3442 if (size > sizeof cmd->data) { 3443 /* Command is too large to fit in a descriptor. */ 3444 if (totlen > MCLBYTES) 3445 return EINVAL; 3446 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3447 if (m == NULL) 3448 return ENOMEM; 3449 cmd = mtod(m, struct iwn_tx_cmd *); 3450 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3451 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3452 if (error != 0) { 3453 m_freem(m); 3454 return error; 3455 } 3456 data->m = m; 3457 } else { 3458 cmd = &ring->cmd[ring->cur]; 3459 paddr = data->cmd_paddr; 3460 } 3461 3462 cmd->code = code; 3463 cmd->flags = 0; 3464 cmd->qid = ring->qid; 3465 cmd->idx = ring->cur; 3466 memcpy(cmd->data, buf, size); 3467 3468 desc->nsegs = 1; 3469 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3470 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3471 3472 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 3473 __func__, iwn_intr_str(cmd->code), cmd->code, 3474 cmd->flags, cmd->qid, cmd->idx); 3475 3476 if (size > sizeof cmd->data) { 3477 bus_dmamap_sync(ring->data_dmat, data->map, 3478 BUS_DMASYNC_PREWRITE); 3479 } else { 3480 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3481 BUS_DMASYNC_PREWRITE); 3482 } 3483 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3484 BUS_DMASYNC_PREWRITE); 3485 3486 #ifdef notyet 3487 /* Update TX scheduler. */ 3488 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0); 3489 #endif 3490 3491 /* Kick command ring. */ 3492 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3493 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3494 3495 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 3496 } 3497 3498 static int 3499 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3500 { 3501 struct iwn4965_node_info hnode; 3502 caddr_t src, dst; 3503 3504 /* 3505 * We use the node structure for 5000 Series internally (it is 3506 * a superset of the one for 4965AGN). We thus copy the common 3507 * fields before sending the command. 3508 */ 3509 src = (caddr_t)node; 3510 dst = (caddr_t)&hnode; 3511 memcpy(dst, src, 48); 3512 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3513 memcpy(dst + 48, src + 72, 20); 3514 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3515 } 3516 3517 static int 3518 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3519 { 3520 /* Direct mapping. */ 3521 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3522 } 3523 3524 #if 0 /* HT */ 3525 static const uint8_t iwn_ridx_to_plcp[] = { 3526 10, 20, 55, 110, /* CCK */ 3527 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */ 3528 }; 3529 static const uint8_t iwn_siso_mcs_to_plcp[] = { 3530 0, 0, 0, 0, /* CCK */ 3531 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */ 3532 }; 3533 static const uint8_t iwn_mimo_mcs_to_plcp[] = { 3534 0, 0, 0, 0, /* CCK */ 3535 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */ 3536 }; 3537 #endif 3538 static const uint8_t iwn_prev_ridx[] = { 3539 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */ 3540 0, 0, 1, 5, /* CCK */ 3541 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */ 3542 }; 3543 3544 /* 3545 * Configure hardware link parameters for the specified 3546 * node operating on the specified channel. 3547 */ 3548 static int 3549 iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async) 3550 { 3551 struct ifnet *ifp = sc->sc_ifp; 3552 struct ieee80211com *ic = ifp->if_l2com; 3553 struct iwn_cmd_link_quality linkq; 3554 const struct iwn_rate *rinfo; 3555 int i; 3556 uint8_t txant, ridx; 3557 3558 /* Use the first valid TX antenna. */ 3559 txant = IWN_LSB(sc->txchainmask); 3560 3561 memset(&linkq, 0, sizeof linkq); 3562 linkq.id = id; 3563 linkq.antmsk_1stream = txant; 3564 linkq.antmsk_2stream = IWN_ANT_AB; 3565 linkq.ampdu_max = 31; 3566 linkq.ampdu_threshold = 3; 3567 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3568 3569 #if 0 /* HT */ 3570 if (IEEE80211_IS_CHAN_HT(c)) 3571 linkq.mimo = 1; 3572 #endif 3573 3574 if (id == IWN_ID_BSS) 3575 ridx = IWN_RIDX_OFDM54; 3576 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) 3577 ridx = IWN_RIDX_OFDM6; 3578 else 3579 ridx = IWN_RIDX_CCK1; 3580 3581 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3582 rinfo = &iwn_rates[ridx]; 3583 #if 0 /* HT */ 3584 if (IEEE80211_IS_CHAN_HT40(c)) { 3585 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx] 3586 | IWN_RIDX_MCS; 3587 linkq.retry[i].rflags = IWN_RFLAG_HT 3588 | IWN_RFLAG_HT40; 3589 /* XXX shortGI */ 3590 } else if (IEEE80211_IS_CHAN_HT(c)) { 3591 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx] 3592 | IWN_RIDX_MCS; 3593 linkq.retry[i].rflags = IWN_RFLAG_HT; 3594 /* XXX shortGI */ 3595 } else 3596 #endif 3597 { 3598 linkq.retry[i].plcp = rinfo->plcp; 3599 linkq.retry[i].rflags = rinfo->flags; 3600 } 3601 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3602 ridx = iwn_prev_ridx[ridx]; 3603 } 3604 #ifdef IWN_DEBUG 3605 if (sc->sc_debug & IWN_DEBUG_STATE) { 3606 printf("%s: set link quality for node %d, mimo %d ssmask %d\n", 3607 __func__, id, linkq.mimo, linkq.antmsk_1stream); 3608 printf("%s:", __func__); 3609 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) 3610 printf(" %d:%x", linkq.retry[i].plcp, 3611 linkq.retry[i].rflags); 3612 printf("\n"); 3613 } 3614 #endif 3615 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3616 } 3617 3618 /* 3619 * Broadcast node is used to send group-addressed and management frames. 3620 */ 3621 static int 3622 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3623 { 3624 const struct iwn_hal *hal = sc->sc_hal; 3625 struct ifnet *ifp = sc->sc_ifp; 3626 struct iwn_node_info node; 3627 int error; 3628 3629 memset(&node, 0, sizeof node); 3630 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3631 node.id = hal->broadcast_id; 3632 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 3633 error = hal->add_node(sc, &node, async); 3634 if (error != 0) 3635 return error; 3636 3637 error = iwn_set_link_quality(sc, hal->broadcast_id, async); 3638 return error; 3639 } 3640 3641 static int 3642 iwn_wme_update(struct ieee80211com *ic) 3643 { 3644 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3645 #define IWN_TXOP_TO_US(v) (v<<5) 3646 struct iwn_softc *sc = ic->ic_ifp->if_softc; 3647 struct iwn_edca_params cmd; 3648 int i; 3649 3650 memset(&cmd, 0, sizeof cmd); 3651 cmd.flags = htole32(IWN_EDCA_UPDATE); 3652 for (i = 0; i < WME_NUM_AC; i++) { 3653 const struct wmeParams *wmep = 3654 &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; 3655 cmd.ac[i].aifsn = wmep->wmep_aifsn; 3656 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin)); 3657 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax)); 3658 cmd.ac[i].txoplimit = 3659 htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit)); 3660 } 3661 IEEE80211_UNLOCK(ic); 3662 IWN_LOCK(sc); 3663 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/); 3664 IWN_UNLOCK(sc); 3665 IEEE80211_LOCK(ic); 3666 return 0; 3667 #undef IWN_TXOP_TO_US 3668 #undef IWN_EXP2 3669 } 3670 3671 static void 3672 iwn_update_mcast(struct ifnet *ifp) 3673 { 3674 /* Ignore */ 3675 } 3676 3677 static void 3678 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3679 { 3680 struct iwn_cmd_led led; 3681 3682 /* Clear microcode LED ownership. */ 3683 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3684 3685 led.which = which; 3686 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3687 led.off = off; 3688 led.on = on; 3689 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3690 } 3691 3692 /* 3693 * Set the critical temperature at which the firmware will stop the radio 3694 * and notify us. 3695 */ 3696 static int 3697 iwn_set_critical_temp(struct iwn_softc *sc) 3698 { 3699 struct iwn_critical_temp crit; 3700 int32_t temp; 3701 3702 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3703 3704 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3705 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3706 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3707 temp = IWN_CTOK(110); 3708 else 3709 temp = 110; 3710 memset(&crit, 0, sizeof crit); 3711 crit.tempR = htole32(temp); 3712 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", 3713 temp); 3714 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3715 } 3716 3717 static int 3718 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3719 { 3720 struct iwn_cmd_timing cmd; 3721 uint64_t val, mod; 3722 3723 memset(&cmd, 0, sizeof cmd); 3724 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3725 cmd.bintval = htole16(ni->ni_intval); 3726 cmd.lintval = htole16(10); 3727 3728 /* Compute remaining time until next beacon. */ 3729 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3730 mod = le64toh(cmd.tstamp) % val; 3731 cmd.binitval = htole32((uint32_t)(val - mod)); 3732 3733 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3734 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3735 3736 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3737 } 3738 3739 static void 3740 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3741 { 3742 struct ifnet *ifp = sc->sc_ifp; 3743 struct ieee80211com *ic = ifp->if_l2com; 3744 3745 /* Adjust TX power if need be (delta >= 3 degC.) */ 3746 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 3747 __func__, sc->temp, temp); 3748 if (abs(temp - sc->temp) >= 3) { 3749 /* Record temperature of last calibration. */ 3750 sc->temp = temp; 3751 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 3752 } 3753 } 3754 3755 /* 3756 * Set TX power for current channel (each rate has its own power settings). 3757 * This function takes into account the regulatory information from EEPROM, 3758 * the current temperature and the current voltage. 3759 */ 3760 static int 3761 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3762 int async) 3763 { 3764 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3765 #define fdivround(a, b, n) \ 3766 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3767 /* Linear interpolation. */ 3768 #define interpolate(x, x1, y1, x2, y2, n) \ 3769 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3770 3771 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3772 struct ifnet *ifp = sc->sc_ifp; 3773 struct ieee80211com *ic = ifp->if_l2com; 3774 struct iwn_ucode_info *uc = &sc->ucode_info; 3775 struct iwn4965_cmd_txpower cmd; 3776 struct iwn4965_eeprom_chan_samples *chans; 3777 int32_t vdiff, tdiff; 3778 int i, c, grp, maxpwr; 3779 const uint8_t *rf_gain, *dsp_gain; 3780 uint8_t chan; 3781 3782 /* Retrieve channel number. */ 3783 chan = ieee80211_chan2ieee(ic, ch); 3784 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 3785 chan); 3786 3787 memset(&cmd, 0, sizeof cmd); 3788 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3789 cmd.chan = chan; 3790 3791 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3792 maxpwr = sc->maxpwr5GHz; 3793 rf_gain = iwn4965_rf_gain_5ghz; 3794 dsp_gain = iwn4965_dsp_gain_5ghz; 3795 } else { 3796 maxpwr = sc->maxpwr2GHz; 3797 rf_gain = iwn4965_rf_gain_2ghz; 3798 dsp_gain = iwn4965_dsp_gain_2ghz; 3799 } 3800 3801 /* Compute voltage compensation. */ 3802 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3803 if (vdiff > 0) 3804 vdiff *= 2; 3805 if (abs(vdiff) > 2) 3806 vdiff = 0; 3807 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3808 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3809 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 3810 3811 /* Get channel attenuation group. */ 3812 if (chan <= 20) /* 1-20 */ 3813 grp = 4; 3814 else if (chan <= 43) /* 34-43 */ 3815 grp = 0; 3816 else if (chan <= 70) /* 44-70 */ 3817 grp = 1; 3818 else if (chan <= 124) /* 71-124 */ 3819 grp = 2; 3820 else /* 125-200 */ 3821 grp = 3; 3822 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3823 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 3824 3825 /* Get channel sub-band. */ 3826 for (i = 0; i < IWN_NBANDS; i++) 3827 if (sc->bands[i].lo != 0 && 3828 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3829 break; 3830 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3831 return EINVAL; 3832 chans = sc->bands[i].chans; 3833 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3834 "%s: chan %d sub-band=%d\n", __func__, chan, i); 3835 3836 for (c = 0; c < 2; c++) { 3837 uint8_t power, gain, temp; 3838 int maxchpwr, pwr, ridx, idx; 3839 3840 power = interpolate(chan, 3841 chans[0].num, chans[0].samples[c][1].power, 3842 chans[1].num, chans[1].samples[c][1].power, 1); 3843 gain = interpolate(chan, 3844 chans[0].num, chans[0].samples[c][1].gain, 3845 chans[1].num, chans[1].samples[c][1].gain, 1); 3846 temp = interpolate(chan, 3847 chans[0].num, chans[0].samples[c][1].temp, 3848 chans[1].num, chans[1].samples[c][1].temp, 1); 3849 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3850 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 3851 __func__, c, power, gain, temp); 3852 3853 /* Compute temperature compensation. */ 3854 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3855 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3856 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 3857 __func__, tdiff, sc->temp, temp); 3858 3859 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3860 /* Convert dBm to half-dBm. */ 3861 maxchpwr = sc->maxpwr[chan] * 2; 3862 if ((ridx / 8) & 1) 3863 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3864 3865 pwr = maxpwr; 3866 3867 /* Adjust TX power based on rate. */ 3868 if ((ridx % 8) == 5) 3869 pwr -= 15; /* OFDM48: -7.5dB */ 3870 else if ((ridx % 8) == 6) 3871 pwr -= 17; /* OFDM54: -8.5dB */ 3872 else if ((ridx % 8) == 7) 3873 pwr -= 20; /* OFDM60: -10dB */ 3874 else 3875 pwr -= 10; /* Others: -5dB */ 3876 3877 /* Do not exceed channel max TX power. */ 3878 if (pwr > maxchpwr) 3879 pwr = maxchpwr; 3880 3881 idx = gain - (pwr - power) - tdiff - vdiff; 3882 if ((ridx / 8) & 1) /* MIMO */ 3883 idx += (int32_t)le32toh(uc->atten[grp][c]); 3884 3885 if (cmd.band == 0) 3886 idx += 9; /* 5GHz */ 3887 if (ridx == IWN_RIDX_MAX) 3888 idx += 5; /* CCK */ 3889 3890 /* Make sure idx stays in a valid range. */ 3891 if (idx < 0) 3892 idx = 0; 3893 else if (idx > IWN4965_MAX_PWR_INDEX) 3894 idx = IWN4965_MAX_PWR_INDEX; 3895 3896 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3897 "%s: Tx chain %d, rate idx %d: power=%d\n", 3898 __func__, c, ridx, idx); 3899 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3900 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3901 } 3902 } 3903 3904 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3905 "%s: set tx power for chan %d\n", __func__, chan); 3906 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3907 3908 #undef interpolate 3909 #undef fdivround 3910 } 3911 3912 static int 3913 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3914 int async) 3915 { 3916 struct iwn5000_cmd_txpower cmd; 3917 3918 /* 3919 * TX power calibration is handled automatically by the firmware 3920 * for 5000 Series. 3921 */ 3922 memset(&cmd, 0, sizeof cmd); 3923 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3924 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3925 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3926 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 3927 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3928 } 3929 3930 /* 3931 * Retrieve the maximum RSSI (in dBm) among receivers. 3932 */ 3933 static int 3934 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3935 { 3936 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3937 uint8_t mask, agc; 3938 int rssi; 3939 3940 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3941 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3942 3943 rssi = 0; 3944 #if 0 3945 if (mask & IWN_ANT_A) /* Ant A */ 3946 rssi = max(rssi, phy->rssi[0]); 3947 if (mask & IWN_ATH_B) /* Ant B */ 3948 rssi = max(rssi, phy->rssi[2]); 3949 if (mask & IWN_ANT_C) /* Ant C */ 3950 rssi = max(rssi, phy->rssi[4]); 3951 #else 3952 rssi = max(rssi, phy->rssi[0]); 3953 rssi = max(rssi, phy->rssi[2]); 3954 rssi = max(rssi, phy->rssi[4]); 3955 #endif 3956 3957 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d " 3958 "result %d\n", __func__, agc, mask, 3959 phy->rssi[0], phy->rssi[2], phy->rssi[4], 3960 rssi - agc - IWN_RSSI_TO_DBM); 3961 return rssi - agc - IWN_RSSI_TO_DBM; 3962 } 3963 3964 static int 3965 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3966 { 3967 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3968 int rssi; 3969 uint8_t agc; 3970 3971 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3972 3973 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3974 le16toh(phy->rssi[1]) & 0xff); 3975 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3976 3977 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d " 3978 "result %d\n", __func__, agc, 3979 phy->rssi[0], phy->rssi[1], phy->rssi[2], 3980 rssi - agc - IWN_RSSI_TO_DBM); 3981 return rssi - agc - IWN_RSSI_TO_DBM; 3982 } 3983 3984 /* 3985 * Retrieve the average noise (in dBm) among receivers. 3986 */ 3987 static int 3988 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3989 { 3990 int i, total, nbant, noise; 3991 3992 total = nbant = 0; 3993 for (i = 0; i < 3; i++) { 3994 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3995 continue; 3996 total += noise; 3997 nbant++; 3998 } 3999 /* There should be at least one antenna but check anyway. */ 4000 return (nbant == 0) ? -127 : (total / nbant) - 107; 4001 } 4002 4003 /* 4004 * Compute temperature (in degC) from last received statistics. 4005 */ 4006 static int 4007 iwn4965_get_temperature(struct iwn_softc *sc) 4008 { 4009 struct iwn_ucode_info *uc = &sc->ucode_info; 4010 int32_t r1, r2, r3, r4, temp; 4011 4012 r1 = le32toh(uc->temp[0].chan20MHz); 4013 r2 = le32toh(uc->temp[1].chan20MHz); 4014 r3 = le32toh(uc->temp[2].chan20MHz); 4015 r4 = le32toh(sc->rawtemp); 4016 4017 if (r1 == r3) /* Prevents division by 0 (should not happen.) */ 4018 return 0; 4019 4020 /* Sign-extend 23-bit R4 value to 32-bit. */ 4021 r4 = (r4 << 8) >> 8; 4022 /* Compute temperature in Kelvin. */ 4023 temp = (259 * (r4 - r2)) / (r3 - r1); 4024 temp = (temp * 97) / 100 + 8; 4025 4026 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4027 IWN_KTOC(temp)); 4028 return IWN_KTOC(temp); 4029 } 4030 4031 static int 4032 iwn5000_get_temperature(struct iwn_softc *sc) 4033 { 4034 int32_t temp; 4035 4036 /* 4037 * Temperature is not used by the driver for 5000 Series because 4038 * TX power calibration is handled by firmware. We export it to 4039 * users through the sensor framework though. 4040 */ 4041 temp = le32toh(sc->rawtemp); 4042 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4043 temp = (temp / -5) + sc->temp_off; 4044 temp = IWN_KTOC(temp); 4045 } 4046 return temp; 4047 } 4048 4049 /* 4050 * Initialize sensitivity calibration state machine. 4051 */ 4052 static int 4053 iwn_init_sensitivity(struct iwn_softc *sc) 4054 { 4055 const struct iwn_hal *hal = sc->sc_hal; 4056 struct iwn_calib_state *calib = &sc->calib; 4057 uint32_t flags; 4058 int error; 4059 4060 /* Reset calibration state machine. */ 4061 memset(calib, 0, sizeof (*calib)); 4062 calib->state = IWN_CALIB_STATE_INIT; 4063 calib->cck_state = IWN_CCK_STATE_HIFA; 4064 /* Set initial correlation values. */ 4065 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4066 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4067 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4068 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4069 calib->cck_x4 = 125; 4070 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4071 calib->energy_cck = sc->limits->energy_cck; 4072 4073 /* Write initial sensitivity. */ 4074 error = iwn_send_sensitivity(sc); 4075 if (error != 0) 4076 return error; 4077 4078 /* Write initial gains. */ 4079 error = hal->init_gains(sc); 4080 if (error != 0) 4081 return error; 4082 4083 /* Request statistics at each beacon interval. */ 4084 flags = 0; 4085 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__); 4086 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4087 } 4088 4089 /* 4090 * Collect noise and RSSI statistics for the first 20 beacons received 4091 * after association and use them to determine connected antennas and 4092 * to set differential gains. 4093 */ 4094 static void 4095 iwn_collect_noise(struct iwn_softc *sc, 4096 const struct iwn_rx_general_stats *stats) 4097 { 4098 const struct iwn_hal *hal = sc->sc_hal; 4099 struct iwn_calib_state *calib = &sc->calib; 4100 uint32_t val; 4101 int i; 4102 4103 /* Accumulate RSSI and noise for all 3 antennas. */ 4104 for (i = 0; i < 3; i++) { 4105 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4106 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4107 } 4108 /* NB: We update differential gains only once after 20 beacons. */ 4109 if (++calib->nbeacons < 20) 4110 return; 4111 4112 /* Determine highest average RSSI. */ 4113 val = MAX(calib->rssi[0], calib->rssi[1]); 4114 val = MAX(calib->rssi[2], val); 4115 4116 /* Determine which antennas are connected. */ 4117 sc->chainmask = sc->rxchainmask; 4118 for (i = 0; i < 3; i++) 4119 if (val - calib->rssi[i] > 15 * 20) 4120 sc->chainmask &= ~(1 << i); 4121 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4122 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4123 __func__, sc->rxchainmask, sc->chainmask); 4124 4125 /* If none of the TX antennas are connected, keep at least one. */ 4126 if ((sc->chainmask & sc->txchainmask) == 0) 4127 sc->chainmask |= IWN_LSB(sc->txchainmask); 4128 4129 (void)hal->set_gains(sc); 4130 calib->state = IWN_CALIB_STATE_RUN; 4131 4132 #ifdef notyet 4133 /* XXX Disable RX chains with no antennas connected. */ 4134 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4135 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4136 #endif 4137 4138 #if 0 4139 /* XXX: not yet */ 4140 /* Enable power-saving mode if requested by user. */ 4141 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 4142 (void)iwn_set_pslevel(sc, 0, 3, 1); 4143 #endif 4144 } 4145 4146 static int 4147 iwn4965_init_gains(struct iwn_softc *sc) 4148 { 4149 struct iwn_phy_calib_gain cmd; 4150 4151 memset(&cmd, 0, sizeof cmd); 4152 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4153 /* Differential gains initially set to 0 for all 3 antennas. */ 4154 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4155 "%s: setting initial differential gains\n", __func__); 4156 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4157 } 4158 4159 static int 4160 iwn5000_init_gains(struct iwn_softc *sc) 4161 { 4162 struct iwn_phy_calib cmd; 4163 4164 memset(&cmd, 0, sizeof cmd); 4165 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 4166 cmd.ngroups = 1; 4167 cmd.isvalid = 1; 4168 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4169 "%s: setting initial differential gains\n", __func__); 4170 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4171 } 4172 4173 static int 4174 iwn4965_set_gains(struct iwn_softc *sc) 4175 { 4176 struct iwn_calib_state *calib = &sc->calib; 4177 struct iwn_phy_calib_gain cmd; 4178 int i, delta, noise; 4179 4180 /* Get minimal noise among connected antennas. */ 4181 noise = INT_MAX; /* NB: There's at least one antenna. */ 4182 for (i = 0; i < 3; i++) 4183 if (sc->chainmask & (1 << i)) 4184 noise = MIN(calib->noise[i], noise); 4185 4186 memset(&cmd, 0, sizeof cmd); 4187 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4188 /* Set differential gains for connected antennas. */ 4189 for (i = 0; i < 3; i++) { 4190 if (sc->chainmask & (1 << i)) { 4191 /* Compute attenuation (in unit of 1.5dB). */ 4192 delta = (noise - (int32_t)calib->noise[i]) / 30; 4193 /* NB: delta <= 0 */ 4194 /* Limit to [-4.5dB,0]. */ 4195 cmd.gain[i] = MIN(abs(delta), 3); 4196 if (delta < 0) 4197 cmd.gain[i] |= 1 << 2; /* sign bit */ 4198 } 4199 } 4200 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4201 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4202 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4203 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4204 } 4205 4206 static int 4207 iwn5000_set_gains(struct iwn_softc *sc) 4208 { 4209 struct iwn_calib_state *calib = &sc->calib; 4210 struct iwn_phy_calib_gain cmd; 4211 int i, ant, delta, div; 4212 4213 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4214 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4215 4216 memset(&cmd, 0, sizeof cmd); 4217 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 4218 cmd.ngroups = 1; 4219 cmd.isvalid = 1; 4220 /* Get first available RX antenna as referential. */ 4221 ant = IWN_LSB(sc->rxchainmask); 4222 /* Set differential gains for other antennas. */ 4223 for (i = ant + 1; i < 3; i++) { 4224 if (sc->chainmask & (1 << i)) { 4225 /* The delta is relative to antenna "ant". */ 4226 delta = ((int32_t)calib->noise[ant] - 4227 (int32_t)calib->noise[i]) / div; 4228 /* Limit to [-4.5dB,+4.5dB]. */ 4229 cmd.gain[i - 1] = MIN(abs(delta), 3); 4230 if (delta < 0) 4231 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4232 } 4233 } 4234 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4235 "setting differential gains Ant B/C: %x/%x (%x)\n", 4236 cmd.gain[0], cmd.gain[1], sc->chainmask); 4237 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4238 } 4239 4240 /* 4241 * Tune RF RX sensitivity based on the number of false alarms detected 4242 * during the last beacon period. 4243 */ 4244 static void 4245 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4246 { 4247 #define inc(val, inc, max) \ 4248 if ((val) < (max)) { \ 4249 if ((val) < (max) - (inc)) \ 4250 (val) += (inc); \ 4251 else \ 4252 (val) = (max); \ 4253 needs_update = 1; \ 4254 } 4255 #define dec(val, dec, min) \ 4256 if ((val) > (min)) { \ 4257 if ((val) > (min) + (dec)) \ 4258 (val) -= (dec); \ 4259 else \ 4260 (val) = (min); \ 4261 needs_update = 1; \ 4262 } 4263 4264 const struct iwn_sensitivity_limits *limits = sc->limits; 4265 struct iwn_calib_state *calib = &sc->calib; 4266 uint32_t val, rxena, fa; 4267 uint32_t energy[3], energy_min; 4268 uint8_t noise[3], noise_ref; 4269 int i, needs_update = 0; 4270 4271 /* Check that we've been enabled long enough. */ 4272 rxena = le32toh(stats->general.load); 4273 if (rxena == 0) 4274 return; 4275 4276 /* Compute number of false alarms since last call for OFDM. */ 4277 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4278 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4279 fa *= 200 * 1024; /* 200TU */ 4280 4281 /* Save counters values for next call. */ 4282 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4283 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4284 4285 if (fa > 50 * rxena) { 4286 /* High false alarm count, decrease sensitivity. */ 4287 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4288 "%s: OFDM high false alarm count: %u\n", __func__, fa); 4289 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4290 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4291 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4292 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4293 4294 } else if (fa < 5 * rxena) { 4295 /* Low false alarm count, increase sensitivity. */ 4296 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4297 "%s: OFDM low false alarm count: %u\n", __func__, fa); 4298 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4299 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4300 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4301 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4302 } 4303 4304 /* Compute maximum noise among 3 receivers. */ 4305 for (i = 0; i < 3; i++) 4306 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4307 val = MAX(noise[0], noise[1]); 4308 val = MAX(noise[2], val); 4309 /* Insert it into our samples table. */ 4310 calib->noise_samples[calib->cur_noise_sample] = val; 4311 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4312 4313 /* Compute maximum noise among last 20 samples. */ 4314 noise_ref = calib->noise_samples[0]; 4315 for (i = 1; i < 20; i++) 4316 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4317 4318 /* Compute maximum energy among 3 receivers. */ 4319 for (i = 0; i < 3; i++) 4320 energy[i] = le32toh(stats->general.energy[i]); 4321 val = MIN(energy[0], energy[1]); 4322 val = MIN(energy[2], val); 4323 /* Insert it into our samples table. */ 4324 calib->energy_samples[calib->cur_energy_sample] = val; 4325 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4326 4327 /* Compute minimum energy among last 10 samples. */ 4328 energy_min = calib->energy_samples[0]; 4329 for (i = 1; i < 10; i++) 4330 energy_min = MAX(energy_min, calib->energy_samples[i]); 4331 energy_min += 6; 4332 4333 /* Compute number of false alarms since last call for CCK. */ 4334 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4335 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4336 fa *= 200 * 1024; /* 200TU */ 4337 4338 /* Save counters values for next call. */ 4339 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4340 calib->fa_cck = le32toh(stats->cck.fa); 4341 4342 if (fa > 50 * rxena) { 4343 /* High false alarm count, decrease sensitivity. */ 4344 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4345 "%s: CCK high false alarm count: %u\n", __func__, fa); 4346 calib->cck_state = IWN_CCK_STATE_HIFA; 4347 calib->low_fa = 0; 4348 4349 if (calib->cck_x4 > 160) { 4350 calib->noise_ref = noise_ref; 4351 if (calib->energy_cck > 2) 4352 dec(calib->energy_cck, 2, energy_min); 4353 } 4354 if (calib->cck_x4 < 160) { 4355 calib->cck_x4 = 161; 4356 needs_update = 1; 4357 } else 4358 inc(calib->cck_x4, 3, limits->max_cck_x4); 4359 4360 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4361 4362 } else if (fa < 5 * rxena) { 4363 /* Low false alarm count, increase sensitivity. */ 4364 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4365 "%s: CCK low false alarm count: %u\n", __func__, fa); 4366 calib->cck_state = IWN_CCK_STATE_LOFA; 4367 calib->low_fa++; 4368 4369 if (calib->cck_state != IWN_CCK_STATE_INIT && 4370 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4371 calib->low_fa > 100)) { 4372 inc(calib->energy_cck, 2, limits->min_energy_cck); 4373 dec(calib->cck_x4, 3, limits->min_cck_x4); 4374 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4375 } 4376 } else { 4377 /* Not worth to increase or decrease sensitivity. */ 4378 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4379 "%s: CCK normal false alarm count: %u\n", __func__, fa); 4380 calib->low_fa = 0; 4381 calib->noise_ref = noise_ref; 4382 4383 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4384 /* Previous interval had many false alarms. */ 4385 dec(calib->energy_cck, 8, energy_min); 4386 } 4387 calib->cck_state = IWN_CCK_STATE_INIT; 4388 } 4389 4390 if (needs_update) 4391 (void)iwn_send_sensitivity(sc); 4392 #undef dec 4393 #undef inc 4394 } 4395 4396 static int 4397 iwn_send_sensitivity(struct iwn_softc *sc) 4398 { 4399 struct iwn_calib_state *calib = &sc->calib; 4400 struct iwn_sensitivity_cmd cmd; 4401 4402 memset(&cmd, 0, sizeof cmd); 4403 cmd.which = IWN_SENSITIVITY_WORKTBL; 4404 /* OFDM modulation. */ 4405 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4406 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4407 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4408 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4409 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4410 cmd.energy_ofdm_th = htole16(62); 4411 /* CCK modulation. */ 4412 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4413 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4414 cmd.energy_cck = htole16(calib->energy_cck); 4415 /* Barker modulation: use default values. */ 4416 cmd.corr_barker = htole16(190); 4417 cmd.corr_barker_mrc = htole16(390); 4418 4419 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4420 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 4421 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4422 calib->ofdm_mrc_x4, calib->cck_x4, 4423 calib->cck_mrc_x4, calib->energy_cck); 4424 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1); 4425 } 4426 4427 /* 4428 * Set STA mode power saving level (between 0 and 5). 4429 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4430 */ 4431 static int 4432 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4433 { 4434 const struct iwn_pmgt *pmgt; 4435 struct iwn_pmgt_cmd cmd; 4436 uint32_t max, skip_dtim; 4437 uint32_t tmp; 4438 int i; 4439 4440 /* Select which PS parameters to use. */ 4441 if (dtim <= 2) 4442 pmgt = &iwn_pmgt[0][level]; 4443 else if (dtim <= 10) 4444 pmgt = &iwn_pmgt[1][level]; 4445 else 4446 pmgt = &iwn_pmgt[2][level]; 4447 4448 memset(&cmd, 0, sizeof cmd); 4449 if (level != 0) /* not CAM */ 4450 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4451 if (level == 5) 4452 cmd.flags |= htole16(IWN_PS_FAST_PD); 4453 /* Retrieve PCIe Active State Power Management (ASPM). */ 4454 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4455 if (!(tmp & 0x1)) /* L0s Entry disabled. */ 4456 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4457 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4458 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4459 4460 if (dtim == 0) { 4461 dtim = 1; 4462 skip_dtim = 0; 4463 } else 4464 skip_dtim = pmgt->skip_dtim; 4465 if (skip_dtim != 0) { 4466 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4467 max = pmgt->intval[4]; 4468 if (max == (uint32_t)-1) 4469 max = dtim * (skip_dtim + 1); 4470 else if (max > dtim) 4471 max = (max / dtim) * dtim; 4472 } else 4473 max = dtim; 4474 for (i = 0; i < 5; i++) 4475 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4476 4477 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 4478 level); 4479 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4480 } 4481 4482 static int 4483 iwn_config(struct iwn_softc *sc) 4484 { 4485 const struct iwn_hal *hal = sc->sc_hal; 4486 struct ifnet *ifp = sc->sc_ifp; 4487 struct ieee80211com *ic = ifp->if_l2com; 4488 struct iwn_bluetooth bluetooth; 4489 uint32_t txmask; 4490 int error; 4491 uint16_t rxchain; 4492 4493 /* Configure valid TX chains for 5000 Series. */ 4494 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4495 txmask = htole32(sc->txchainmask); 4496 DPRINTF(sc, IWN_DEBUG_RESET, 4497 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 4498 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4499 sizeof txmask, 0); 4500 if (error != 0) { 4501 device_printf(sc->sc_dev, 4502 "%s: could not configure valid TX chains, " 4503 "error %d\n", __func__, error); 4504 return error; 4505 } 4506 } 4507 4508 /* Configure bluetooth coexistence. */ 4509 memset(&bluetooth, 0, sizeof bluetooth); 4510 bluetooth.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4511 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4512 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4513 DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n", 4514 __func__); 4515 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4516 if (error != 0) { 4517 device_printf(sc->sc_dev, 4518 "%s: could not configure bluetooth coexistence, error %d\n", 4519 __func__, error); 4520 return error; 4521 } 4522 4523 /* Set mode, channel, RX filter and enable RX. */ 4524 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4525 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 4526 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 4527 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 4528 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4529 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 4530 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4531 switch (ic->ic_opmode) { 4532 case IEEE80211_M_STA: 4533 sc->rxon.mode = IWN_MODE_STA; 4534 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4535 break; 4536 case IEEE80211_M_MONITOR: 4537 sc->rxon.mode = IWN_MODE_MONITOR; 4538 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4539 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4540 break; 4541 default: 4542 /* Should not get there. */ 4543 break; 4544 } 4545 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4546 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4547 sc->rxon.ht_single_mask = 0xff; 4548 sc->rxon.ht_dual_mask = 0xff; 4549 sc->rxon.ht_triple_mask = 0xff; 4550 rxchain = 4551 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4552 IWN_RXCHAIN_MIMO_COUNT(2) | 4553 IWN_RXCHAIN_IDLE_COUNT(2); 4554 sc->rxon.rxchain = htole16(rxchain); 4555 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 4556 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0); 4557 if (error != 0) { 4558 device_printf(sc->sc_dev, 4559 "%s: RXON command failed\n", __func__); 4560 return error; 4561 } 4562 4563 error = iwn_add_broadcast_node(sc, 0); 4564 if (error != 0) { 4565 device_printf(sc->sc_dev, 4566 "%s: could not add broadcast node\n", __func__); 4567 return error; 4568 } 4569 4570 /* Configuration has changed, set TX power accordingly. */ 4571 error = hal->set_txpower(sc, ic->ic_curchan, 0); 4572 if (error != 0) { 4573 device_printf(sc->sc_dev, 4574 "%s: could not set TX power\n", __func__); 4575 return error; 4576 } 4577 4578 error = iwn_set_critical_temp(sc); 4579 if (error != 0) { 4580 device_printf(sc->sc_dev, 4581 "%s: ccould not set critical temperature\n", __func__); 4582 return error; 4583 } 4584 4585 /* Set power saving level to CAM during initialization. */ 4586 error = iwn_set_pslevel(sc, 0, 0, 0); 4587 if (error != 0) { 4588 device_printf(sc->sc_dev, 4589 "%s: could not set power saving level\n", __func__); 4590 return error; 4591 } 4592 return 0; 4593 } 4594 4595 static int 4596 iwn_scan(struct iwn_softc *sc) 4597 { 4598 struct ifnet *ifp = sc->sc_ifp; 4599 struct ieee80211com *ic = ifp->if_l2com; 4600 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 4601 struct iwn_scan_hdr *hdr; 4602 struct iwn_cmd_data *tx; 4603 struct iwn_scan_essid *essid; 4604 struct iwn_scan_chan *chan; 4605 struct ieee80211_frame *wh; 4606 struct ieee80211_rateset *rs; 4607 struct ieee80211_channel *c; 4608 int buflen, error, nrates; 4609 uint16_t rxchain; 4610 uint8_t *buf, *frm, txant; 4611 4612 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4613 if (buf == NULL) { 4614 device_printf(sc->sc_dev, 4615 "%s: could not allocate buffer for scan command\n", 4616 __func__); 4617 return ENOMEM; 4618 } 4619 hdr = (struct iwn_scan_hdr *)buf; 4620 4621 /* 4622 * Move to the next channel if no frames are received within 10ms 4623 * after sending the probe request. 4624 */ 4625 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4626 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4627 4628 /* Select antennas for scanning. */ 4629 rxchain = 4630 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4631 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4632 IWN_RXCHAIN_DRIVER_FORCE; 4633 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 4634 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4635 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4636 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4637 } else /* Use all available RX antennas. */ 4638 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4639 hdr->rxchain = htole16(rxchain); 4640 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4641 4642 tx = (struct iwn_cmd_data *)(hdr + 1); 4643 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4644 tx->id = sc->sc_hal->broadcast_id; 4645 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4646 4647 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { 4648 /* Send probe requests at 6Mbps. */ 4649 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4650 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4651 } else { 4652 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4653 /* Send probe requests at 1Mbps. */ 4654 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4655 tx->rflags = IWN_RFLAG_CCK; 4656 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4657 } 4658 /* Use the first valid TX antenna. */ 4659 txant = IWN_LSB(sc->txchainmask); 4660 tx->rflags |= IWN_RFLAG_ANT(txant); 4661 4662 essid = (struct iwn_scan_essid *)(tx + 1); 4663 if (ss->ss_ssid[0].len != 0) { 4664 essid[0].id = IEEE80211_ELEMID_SSID; 4665 essid[0].len = ss->ss_ssid[0].len; 4666 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4667 } 4668 4669 /* 4670 * Build a probe request frame. Most of the following code is a 4671 * copy & paste of what is done in net80211. 4672 */ 4673 wh = (struct ieee80211_frame *)(essid + 20); 4674 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4675 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4676 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4677 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4678 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 4679 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4680 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4681 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4682 4683 frm = (uint8_t *)(wh + 1); 4684 4685 /* Add SSID IE. */ 4686 *frm++ = IEEE80211_ELEMID_SSID; 4687 *frm++ = ss->ss_ssid[0].len; 4688 memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4689 frm += ss->ss_ssid[0].len; 4690 4691 /* Add supported rates IE. */ 4692 *frm++ = IEEE80211_ELEMID_RATES; 4693 nrates = rs->rs_nrates; 4694 if (nrates > IEEE80211_RATE_SIZE) 4695 nrates = IEEE80211_RATE_SIZE; 4696 *frm++ = nrates; 4697 memcpy(frm, rs->rs_rates, nrates); 4698 frm += nrates; 4699 4700 /* Add supported xrates IE. */ 4701 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 4702 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; 4703 *frm++ = IEEE80211_ELEMID_XRATES; 4704 *frm++ = (uint8_t)nrates; 4705 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); 4706 frm += nrates; 4707 } 4708 4709 /* Set length of probe request. */ 4710 tx->len = htole16(frm - (uint8_t *)wh); 4711 4712 c = ic->ic_curchan; 4713 chan = (struct iwn_scan_chan *)frm; 4714 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4715 chan->flags = 0; 4716 if (ss->ss_nssid > 0) 4717 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4718 chan->dsp_gain = 0x6e; 4719 if (IEEE80211_IS_CHAN_5GHZ(c) && 4720 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4721 chan->rf_gain = 0x3b; 4722 chan->active = htole16(24); 4723 chan->passive = htole16(110); 4724 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4725 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 4726 chan->rf_gain = 0x3b; 4727 chan->active = htole16(24); 4728 if (sc->rxon.associd) 4729 chan->passive = htole16(78); 4730 else 4731 chan->passive = htole16(110); 4732 hdr->crc_threshold = 0xffff; 4733 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4734 chan->rf_gain = 0x28; 4735 chan->active = htole16(36); 4736 chan->passive = htole16(120); 4737 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4738 } else { 4739 chan->rf_gain = 0x28; 4740 chan->active = htole16(36); 4741 if (sc->rxon.associd) 4742 chan->passive = htole16(88); 4743 else 4744 chan->passive = htole16(120); 4745 hdr->crc_threshold = 0xffff; 4746 } 4747 4748 DPRINTF(sc, IWN_DEBUG_STATE, 4749 "%s: chan %u flags 0x%x rf_gain 0x%x " 4750 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 4751 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 4752 chan->active, chan->passive); 4753 4754 hdr->nchan++; 4755 chan++; 4756 buflen = (uint8_t *)chan - buf; 4757 hdr->len = htole16(buflen); 4758 4759 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 4760 hdr->nchan); 4761 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4762 free(buf, M_DEVBUF); 4763 return error; 4764 } 4765 4766 static int 4767 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 4768 { 4769 const struct iwn_hal *hal = sc->sc_hal; 4770 struct ifnet *ifp = sc->sc_ifp; 4771 struct ieee80211com *ic = ifp->if_l2com; 4772 struct ieee80211_node *ni = vap->iv_bss; 4773 int error; 4774 4775 sc->calib.state = IWN_CALIB_STATE_INIT; 4776 4777 /* Update adapter configuration. */ 4778 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4779 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4780 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4781 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4782 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4783 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4784 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4785 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4786 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4787 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4788 sc->rxon.cck_mask = 0; 4789 sc->rxon.ofdm_mask = 0x15; 4790 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4791 sc->rxon.cck_mask = 0x03; 4792 sc->rxon.ofdm_mask = 0; 4793 } else { 4794 /* XXX assume 802.11b/g */ 4795 sc->rxon.cck_mask = 0x0f; 4796 sc->rxon.ofdm_mask = 0x15; 4797 } 4798 DPRINTF(sc, IWN_DEBUG_STATE, 4799 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4800 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4801 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4802 __func__, 4803 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4804 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4805 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4806 le16toh(sc->rxon.rxchain), 4807 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4808 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4809 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4810 if (error != 0) { 4811 device_printf(sc->sc_dev, 4812 "%s: RXON command failed, error %d\n", __func__, error); 4813 return error; 4814 } 4815 4816 /* Configuration has changed, set TX power accordingly. */ 4817 error = hal->set_txpower(sc, ni->ni_chan, 1); 4818 if (error != 0) { 4819 device_printf(sc->sc_dev, 4820 "%s: could not set Tx power, error %d\n", __func__, error); 4821 return error; 4822 } 4823 /* 4824 * Reconfiguring RXON clears the firmware nodes table so we must 4825 * add the broadcast node again. 4826 */ 4827 error = iwn_add_broadcast_node(sc, 1); 4828 if (error != 0) { 4829 device_printf(sc->sc_dev, 4830 "%s: could not add broadcast node, error %d\n", 4831 __func__, error); 4832 return error; 4833 } 4834 return 0; 4835 } 4836 4837 /* 4838 * Configure the adapter for associated state. 4839 */ 4840 static int 4841 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 4842 { 4843 #define MS(v,x) (((v) & x) >> x##_S) 4844 const struct iwn_hal *hal = sc->sc_hal; 4845 struct ifnet *ifp = sc->sc_ifp; 4846 struct ieee80211com *ic = ifp->if_l2com; 4847 struct ieee80211_node *ni = vap->iv_bss; 4848 struct iwn_node_info node; 4849 int error; 4850 4851 sc->calib.state = IWN_CALIB_STATE_INIT; 4852 4853 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4854 /* Link LED blinks while monitoring. */ 4855 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4856 return 0; 4857 } 4858 error = iwn_set_timing(sc, ni); 4859 if (error != 0) { 4860 device_printf(sc->sc_dev, 4861 "%s: could not set timing, error %d\n", __func__, error); 4862 return error; 4863 } 4864 4865 /* Update adapter configuration. */ 4866 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4867 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4868 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4869 /* Short preamble and slot time are negotiated when associating. */ 4870 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4871 sc->rxon.flags |= htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4872 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4873 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4874 else 4875 sc->rxon.flags &= ~htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4876 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4877 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4878 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4879 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4880 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4881 sc->rxon.cck_mask = 0; 4882 sc->rxon.ofdm_mask = 0x15; 4883 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4884 sc->rxon.cck_mask = 0x03; 4885 sc->rxon.ofdm_mask = 0; 4886 } else { 4887 /* XXX assume 802.11b/g */ 4888 sc->rxon.cck_mask = 0x0f; 4889 sc->rxon.ofdm_mask = 0x15; 4890 } 4891 #if 0 /* HT */ 4892 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 4893 sc->rxon.flags &= ~htole32(IWN_RXON_HT); 4894 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) 4895 sc->rxon.flags |= htole32(IWN_RXON_HT40U); 4896 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 4897 sc->rxon.flags |= htole32(IWN_RXON_HT40D); 4898 else 4899 sc->rxon.flags |= htole32(IWN_RXON_HT20); 4900 sc->rxon.rxchain = htole16( 4901 IWN_RXCHAIN_VALID(3) 4902 | IWN_RXCHAIN_MIMO_COUNT(3) 4903 | IWN_RXCHAIN_IDLE_COUNT(1) 4904 | IWN_RXCHAIN_MIMO_FORCE); 4905 4906 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); 4907 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); 4908 } else 4909 maxrxampdu = ampdudensity = 0; 4910 #endif 4911 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4912 4913 DPRINTF(sc, IWN_DEBUG_STATE, 4914 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4915 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4916 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4917 __func__, 4918 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4919 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4920 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4921 le16toh(sc->rxon.rxchain), 4922 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4923 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4924 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4925 if (error != 0) { 4926 device_printf(sc->sc_dev, 4927 "%s: could not update configuration, error %d\n", 4928 __func__, error); 4929 return error; 4930 } 4931 4932 /* Configuration has changed, set TX power accordingly. */ 4933 error = hal->set_txpower(sc, ni->ni_chan, 1); 4934 if (error != 0) { 4935 device_printf(sc->sc_dev, 4936 "%s: could not set Tx power, error %d\n", __func__, error); 4937 return error; 4938 } 4939 4940 /* Add BSS node. */ 4941 memset(&node, 0, sizeof node); 4942 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4943 node.id = IWN_ID_BSS; 4944 #ifdef notyet 4945 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4946 IWN_AMDPU_DENSITY(5)); /* 2us */ 4947 #endif 4948 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n", 4949 __func__, node.id, le32toh(node.htflags)); 4950 error = hal->add_node(sc, &node, 1); 4951 if (error != 0) { 4952 device_printf(sc->sc_dev, "could not add BSS node\n"); 4953 return error; 4954 } 4955 DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n", 4956 node.id); 4957 error = iwn_set_link_quality(sc, node.id, 1); 4958 if (error != 0) { 4959 device_printf(sc->sc_dev, 4960 "%s: could not setup MRR for node %d, error %d\n", 4961 __func__, node.id, error); 4962 return error; 4963 } 4964 4965 error = iwn_init_sensitivity(sc); 4966 if (error != 0) { 4967 device_printf(sc->sc_dev, 4968 "%s: could not set sensitivity, error %d\n", 4969 __func__, error); 4970 return error; 4971 } 4972 4973 /* Start periodic calibration timer. */ 4974 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4975 iwn_calib_reset(sc); 4976 4977 /* Link LED always on while associated. */ 4978 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4979 4980 return 0; 4981 #undef MS 4982 } 4983 4984 #if 0 /* HT */ 4985 /* 4986 * This function is called by upper layer when an ADDBA request is received 4987 * from another STA and before the ADDBA response is sent. 4988 */ 4989 static int 4990 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4991 uint8_t tid) 4992 { 4993 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 4994 struct iwn_softc *sc = ic->ic_softc; 4995 struct iwn_node *wn = (void *)ni; 4996 struct iwn_node_info node; 4997 4998 memset(&node, 0, sizeof node); 4999 node.id = wn->id; 5000 node.control = IWN_NODE_UPDATE; 5001 node.flags = IWN_FLAG_SET_ADDBA; 5002 node.addba_tid = tid; 5003 node.addba_ssn = htole16(ba->ba_winstart); 5004 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 5005 wn->id, tid, ba->ba_winstart)); 5006 return sc->sc_hal->add_node(sc, &node, 1); 5007 } 5008 5009 /* 5010 * This function is called by upper layer on teardown of an HT-immediate 5011 * Block Ack agreement (eg. uppon receipt of a DELBA frame.) 5012 */ 5013 static void 5014 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5015 uint8_t tid) 5016 { 5017 struct iwn_softc *sc = ic->ic_softc; 5018 struct iwn_node *wn = (void *)ni; 5019 struct iwn_node_info node; 5020 5021 memset(&node, 0, sizeof node); 5022 node.id = wn->id; 5023 node.control = IWN_NODE_UPDATE; 5024 node.flags = IWN_FLAG_SET_DELBA; 5025 node.delba_tid = tid; 5026 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5027 (void)sc->sc_hal->add_node(sc, &node, 1); 5028 } 5029 5030 /* 5031 * This function is called by upper layer when an ADDBA response is received 5032 * from another STA. 5033 */ 5034 static int 5035 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5036 uint8_t tid) 5037 { 5038 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5039 struct iwn_softc *sc = ic->ic_softc; 5040 const struct iwn_hal *hal = sc->sc_hal; 5041 struct iwn_node *wn = (void *)ni; 5042 struct iwn_node_info node; 5043 int error; 5044 5045 /* Enable TX for the specified RA/TID. */ 5046 wn->disable_tid &= ~(1 << tid); 5047 memset(&node, 0, sizeof node); 5048 node.id = wn->id; 5049 node.control = IWN_NODE_UPDATE; 5050 node.flags = IWN_FLAG_SET_DISABLE_TID; 5051 node.disable_tid = htole16(wn->disable_tid); 5052 error = hal->add_node(sc, &node, 1); 5053 if (error != 0) 5054 return error; 5055 5056 if ((error = iwn_nic_lock(sc)) != 0) 5057 return error; 5058 hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5059 iwn_nic_unlock(sc); 5060 return 0; 5061 } 5062 5063 static void 5064 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5065 uint8_t tid) 5066 { 5067 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5068 struct iwn_softc *sc = ic->ic_softc; 5069 int error; 5070 5071 error = iwn_nic_lock(sc); 5072 if (error != 0) 5073 return; 5074 sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5075 iwn_nic_unlock(sc); 5076 } 5077 5078 static void 5079 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5080 uint8_t tid, uint16_t ssn) 5081 { 5082 struct iwn_node *wn = (void *)ni; 5083 int qid = 7 + tid; 5084 5085 /* Stop TX scheduler while we're changing its configuration. */ 5086 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5087 IWN4965_TXQ_STATUS_CHGACT); 5088 5089 /* Assign RA/TID translation to the queue. */ 5090 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5091 wn->id << 4 | tid); 5092 5093 /* Enable chain-building mode for the queue. */ 5094 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5095 5096 /* Set starting sequence number from the ADDBA request. */ 5097 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5098 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5099 5100 /* Set scheduler window size. */ 5101 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5102 IWN_SCHED_WINSZ); 5103 /* Set scheduler frame limit. */ 5104 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5105 IWN_SCHED_LIMIT << 16); 5106 5107 /* Enable interrupts for the queue. */ 5108 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5109 5110 /* Mark the queue as active. */ 5111 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5112 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5113 iwn_tid2fifo[tid] << 1); 5114 } 5115 5116 static void 5117 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5118 { 5119 int qid = 7 + tid; 5120 5121 /* Stop TX scheduler while we're changing its configuration. */ 5122 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5123 IWN4965_TXQ_STATUS_CHGACT); 5124 5125 /* Set starting sequence number from the ADDBA request. */ 5126 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5127 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5128 5129 /* Disable interrupts for the queue. */ 5130 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5131 5132 /* Mark the queue as inactive. */ 5133 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5134 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5135 } 5136 5137 static void 5138 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5139 uint8_t tid, uint16_t ssn) 5140 { 5141 struct iwn_node *wn = (void *)ni; 5142 int qid = 10 + tid; 5143 5144 /* Stop TX scheduler while we're changing its configuration. */ 5145 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5146 IWN5000_TXQ_STATUS_CHGACT); 5147 5148 /* Assign RA/TID translation to the queue. */ 5149 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5150 wn->id << 4 | tid); 5151 5152 /* Enable chain-building mode for the queue. */ 5153 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5154 5155 /* Enable aggregation for the queue. */ 5156 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5157 5158 /* Set starting sequence number from the ADDBA request. */ 5159 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5160 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5161 5162 /* Set scheduler window size and frame limit. */ 5163 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5164 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5165 5166 /* Enable interrupts for the queue. */ 5167 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5168 5169 /* Mark the queue as active. */ 5170 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5171 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5172 } 5173 5174 static void 5175 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5176 { 5177 int qid = 10 + tid; 5178 5179 /* Stop TX scheduler while we're changing its configuration. */ 5180 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5181 IWN5000_TXQ_STATUS_CHGACT); 5182 5183 /* Disable aggregation for the queue. */ 5184 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5185 5186 /* Set starting sequence number from the ADDBA request. */ 5187 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5188 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5189 5190 /* Disable interrupts for the queue. */ 5191 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5192 5193 /* Mark the queue as inactive. */ 5194 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5195 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5196 } 5197 #endif 5198 5199 /* 5200 * Send calibration results to the runtime firmware. These results were 5201 * obtained on first boot from the initialization firmware, or by reading 5202 * the EEPROM for crystal calibration. 5203 */ 5204 static int 5205 iwn5000_send_calib_results(struct iwn_softc *sc) 5206 { 5207 struct iwn_calib_info *calib_result; 5208 int idx, error; 5209 5210 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5211 calib_result = &sc->calib_results[idx]; 5212 5213 /* No support for this type of calibration. */ 5214 if ((sc->calib_init & (1 << idx)) == 0) 5215 continue; 5216 5217 /* No calibration result available. */ 5218 if (calib_result->buf == NULL) 5219 continue; 5220 5221 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5222 "%s: send calibration result idx=%d, len=%d\n", 5223 __func__, idx, calib_result->len); 5224 5225 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf, 5226 calib_result->len, 0); 5227 if (error != 0) { 5228 device_printf(sc->sc_dev, 5229 "%s: could not send calibration result " 5230 "idx=%d, error=%d\n", 5231 __func__, idx, error); 5232 return error; 5233 } 5234 } 5235 return 0; 5236 } 5237 5238 /* 5239 * Save calibration result at the given index. The index determines 5240 * in which order the results are sent to the runtime firmware. 5241 */ 5242 static int 5243 iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib, 5244 int len, int idx) 5245 { 5246 struct iwn_calib_info *calib_result = &sc->calib_results[idx]; 5247 5248 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5249 "%s: saving calibration result code=%d, idx=%d, len=%d\n", 5250 __func__, calib->code, idx, len); 5251 5252 if (calib_result->buf != NULL) 5253 free(calib_result->buf, M_DEVBUF); 5254 5255 calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT); 5256 if (calib_result->buf == NULL) { 5257 device_printf(sc->sc_dev, 5258 "%s: not enough memory for calibration result " 5259 "code=%d, len=%d\n", __func__, calib->code, len); 5260 return ENOMEM; 5261 } 5262 5263 calib_result->len = len; 5264 memcpy(calib_result->buf, calib, len); 5265 return 0; 5266 } 5267 5268 static void 5269 iwn5000_free_calib_results(struct iwn_softc *sc) 5270 { 5271 struct iwn_calib_info *calib_result; 5272 int idx; 5273 5274 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5275 calib_result = &sc->calib_results[idx]; 5276 5277 if (calib_result->buf != NULL) 5278 free(calib_result->buf, M_DEVBUF); 5279 5280 calib_result->buf = NULL; 5281 calib_result->len = 0; 5282 } 5283 } 5284 5285 /* 5286 * Obtain the crystal calibration result from the EEPROM. 5287 */ 5288 static int 5289 iwn5000_chrystal_calib(struct iwn_softc *sc) 5290 { 5291 struct iwn5000_phy_calib_crystal cmd; 5292 uint32_t base, crystal; 5293 uint16_t val; 5294 5295 /* Read crystal calibration. */ 5296 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 5297 base = le16toh(val); 5298 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal, 5299 sizeof(uint32_t)); 5300 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n", 5301 __func__, le32toh(crystal)); 5302 5303 memset(&cmd, 0, sizeof cmd); 5304 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5305 cmd.ngroups = 1; 5306 cmd.isvalid = 1; 5307 cmd.cap_pin[0] = le32toh(crystal) & 0xff; 5308 cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff; 5309 5310 return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd, 5311 sizeof cmd, IWN_CALIB_IDX_XTAL); 5312 } 5313 5314 /* 5315 * Query calibration results from the initialization firmware. We do this 5316 * only once at first boot. 5317 */ 5318 static int 5319 iwn5000_send_calib_query(struct iwn_softc *sc) 5320 { 5321 #define CALIB_INIT_CFG 0xffffffff; 5322 struct iwn5000_calib_config cmd; 5323 int error; 5324 5325 memset(&cmd, 0, sizeof cmd); 5326 cmd.ucode.once.enable = CALIB_INIT_CFG; 5327 cmd.ucode.once.start = CALIB_INIT_CFG; 5328 cmd.ucode.once.send = CALIB_INIT_CFG; 5329 cmd.ucode.flags = CALIB_INIT_CFG; 5330 5331 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: query calibration results\n", 5332 __func__); 5333 5334 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5335 if (error != 0) 5336 return error; 5337 5338 /* Wait at most two seconds for calibration to complete. */ 5339 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5340 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz); 5341 5342 return error; 5343 #undef CALIB_INIT_CFG 5344 } 5345 5346 /* 5347 * Process a CALIBRATION_RESULT notification sent by the initialization 5348 * firmware on response to a CMD_CALIB_CONFIG command. 5349 */ 5350 static int 5351 iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc, 5352 struct iwn_rx_data *data) 5353 { 5354 #define FRAME_SIZE_MASK 0x3fff 5355 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 5356 int len, idx; 5357 5358 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 5359 len = (le32toh(desc->len) & FRAME_SIZE_MASK); 5360 5361 /* Remove length field itself. */ 5362 len -= 4; 5363 5364 /* 5365 * Determine the order in which the results will be send to the 5366 * runtime firmware. 5367 */ 5368 switch (calib->code) { 5369 case IWN5000_PHY_CALIB_DC: 5370 idx = IWN_CALIB_IDX_DC; 5371 break; 5372 case IWN5000_PHY_CALIB_LO: 5373 idx = IWN_CALIB_IDX_LO; 5374 break; 5375 case IWN5000_PHY_CALIB_TX_IQ: 5376 idx = IWN_CALIB_IDX_TX_IQ; 5377 break; 5378 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 5379 idx = IWN_CALIB_IDX_TX_IQ_PERIODIC; 5380 break; 5381 case IWN5000_PHY_CALIB_BASE_BAND: 5382 idx = IWN_CALIB_IDX_BASE_BAND; 5383 break; 5384 default: 5385 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5386 "%s: unknown calibration code=%d\n", __func__, calib->code); 5387 return EINVAL; 5388 } 5389 return iwn5000_save_calib_result(sc, calib, len, idx); 5390 #undef FRAME_SIZE_MASK 5391 } 5392 5393 static int 5394 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5395 { 5396 struct iwn5000_wimax_coex wimax; 5397 5398 #ifdef notyet 5399 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5400 /* Enable WiMAX coexistence for combo adapters. */ 5401 wimax.flags = 5402 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5403 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5404 IWN_WIMAX_COEX_STA_TABLE_VALID | 5405 IWN_WIMAX_COEX_ENABLE; 5406 memcpy(wimax.events, iwn6050_wimax_events, 5407 sizeof iwn6050_wimax_events); 5408 } else 5409 #endif 5410 { 5411 /* Disable WiMAX coexistence. */ 5412 wimax.flags = 0; 5413 memset(wimax.events, 0, sizeof wimax.events); 5414 } 5415 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 5416 __func__); 5417 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5418 } 5419 5420 /* 5421 * This function is called after the runtime firmware notifies us of its 5422 * readiness (called in a process context.) 5423 */ 5424 static int 5425 iwn4965_post_alive(struct iwn_softc *sc) 5426 { 5427 int error, qid; 5428 5429 if ((error = iwn_nic_lock(sc)) != 0) 5430 return error; 5431 5432 /* Clear TX scheduler state in SRAM. */ 5433 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5434 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5435 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5436 5437 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5438 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5439 5440 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5441 5442 /* Disable chain mode for all our 16 queues. */ 5443 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5444 5445 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5446 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5447 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5448 5449 /* Set scheduler window size. */ 5450 iwn_mem_write(sc, sc->sched_base + 5451 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5452 /* Set scheduler frame limit. */ 5453 iwn_mem_write(sc, sc->sched_base + 5454 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5455 IWN_SCHED_LIMIT << 16); 5456 } 5457 5458 /* Enable interrupts for all our 16 queues. */ 5459 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5460 /* Identify TX FIFO rings (0-7). */ 5461 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5462 5463 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5464 for (qid = 0; qid < 7; qid++) { 5465 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5466 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5467 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5468 } 5469 iwn_nic_unlock(sc); 5470 return 0; 5471 } 5472 5473 /* 5474 * This function is called after the initialization or runtime firmware 5475 * notifies us of its readiness (called in a process context.) 5476 */ 5477 static int 5478 iwn5000_post_alive(struct iwn_softc *sc) 5479 { 5480 int error, qid; 5481 5482 /* Switch to using ICT interrupt mode. */ 5483 iwn5000_ict_reset(sc); 5484 5485 error = iwn_nic_lock(sc); 5486 if (error != 0) 5487 return error; 5488 5489 /* Clear TX scheduler state in SRAM. */ 5490 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5491 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5492 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5493 5494 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5495 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5496 5497 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5498 5499 /* Enable chain mode for all queues, except command queue. */ 5500 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5501 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5502 5503 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5504 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5505 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5506 5507 iwn_mem_write(sc, sc->sched_base + 5508 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5509 /* Set scheduler window size and frame limit. */ 5510 iwn_mem_write(sc, sc->sched_base + 5511 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5512 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5513 } 5514 5515 /* Enable interrupts for all our 20 queues. */ 5516 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5517 /* Identify TX FIFO rings (0-7). */ 5518 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5519 5520 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5521 for (qid = 0; qid < 7; qid++) { 5522 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5523 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5524 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5525 } 5526 iwn_nic_unlock(sc); 5527 5528 /* Configure WiMAX coexistence for combo adapters. */ 5529 error = iwn5000_send_wimax_coex(sc); 5530 if (error != 0) { 5531 device_printf(sc->sc_dev, 5532 "%s: could not configure WiMAX coexistence, error %d\n", 5533 __func__, error); 5534 return error; 5535 } 5536 5537 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5538 /* 5539 * Start calibration by setting and sending the chrystal 5540 * calibration first, this must be done before we are able 5541 * to query the other calibration results. 5542 */ 5543 error = iwn5000_chrystal_calib(sc); 5544 if (error != 0) { 5545 device_printf(sc->sc_dev, 5546 "%s: could not set chrystal calibration, " 5547 "error=%d\n", __func__, error); 5548 return error; 5549 } 5550 error = iwn5000_send_calib_results(sc); 5551 if (error != 0) { 5552 device_printf(sc->sc_dev, 5553 "%s: could not send chrystal calibration, " 5554 "error=%d\n", __func__, error); 5555 return error; 5556 } 5557 5558 /* 5559 * Query other calibration results from the initialization 5560 * firmware. 5561 */ 5562 error = iwn5000_send_calib_query(sc); 5563 if (error != 0) { 5564 device_printf(sc->sc_dev, 5565 "%s: could not query calibration, error=%d\n", 5566 __func__, error); 5567 return error; 5568 } 5569 5570 /* 5571 * We have the calibration results now, reboot with the 5572 * runtime firmware (call ourselves recursively!) 5573 */ 5574 iwn_hw_stop(sc); 5575 error = iwn_hw_init(sc); 5576 } else { 5577 /* 5578 * Send calibration results obtained from the initialization 5579 * firmware to the runtime firmware. 5580 */ 5581 error = iwn5000_send_calib_results(sc); 5582 } 5583 return error; 5584 } 5585 5586 /* 5587 * The firmware boot code is small and is intended to be copied directly into 5588 * the NIC internal memory (no DMA transfer.) 5589 */ 5590 static int 5591 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5592 { 5593 int error, ntries; 5594 5595 size /= sizeof (uint32_t); 5596 5597 error = iwn_nic_lock(sc); 5598 if (error != 0) 5599 return error; 5600 5601 /* Copy microcode image into NIC memory. */ 5602 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5603 (const uint32_t *)ucode, size); 5604 5605 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5606 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5607 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5608 5609 /* Start boot load now. */ 5610 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5611 5612 /* Wait for transfer to complete. */ 5613 for (ntries = 0; ntries < 1000; ntries++) { 5614 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5615 IWN_BSM_WR_CTRL_START)) 5616 break; 5617 DELAY(10); 5618 } 5619 if (ntries == 1000) { 5620 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5621 __func__); 5622 iwn_nic_unlock(sc); 5623 return ETIMEDOUT; 5624 } 5625 5626 /* Enable boot after power up. */ 5627 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5628 5629 iwn_nic_unlock(sc); 5630 return 0; 5631 } 5632 5633 static int 5634 iwn4965_load_firmware(struct iwn_softc *sc) 5635 { 5636 struct iwn_fw_info *fw = &sc->fw; 5637 struct iwn_dma_info *dma = &sc->fw_dma; 5638 int error; 5639 5640 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5641 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5642 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5643 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5644 fw->init.text, fw->init.textsz); 5645 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5646 5647 /* Tell adapter where to find initialization sections. */ 5648 error = iwn_nic_lock(sc); 5649 if (error != 0) 5650 return error; 5651 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5652 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5653 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5654 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5655 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5656 iwn_nic_unlock(sc); 5657 5658 /* Load firmware boot code. */ 5659 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5660 if (error != 0) { 5661 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5662 __func__); 5663 return error; 5664 } 5665 /* Now press "execute". */ 5666 IWN_WRITE(sc, IWN_RESET, 0); 5667 5668 /* Wait at most one second for first alive notification. */ 5669 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5670 if (error) { 5671 device_printf(sc->sc_dev, 5672 "%s: timeout waiting for adapter to initialize, error %d\n", 5673 __func__, error); 5674 return error; 5675 } 5676 5677 /* Retrieve current temperature for initial TX power calibration. */ 5678 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5679 sc->temp = iwn4965_get_temperature(sc); 5680 5681 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5682 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5683 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5684 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5685 fw->main.text, fw->main.textsz); 5686 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5687 5688 /* Tell adapter where to find runtime sections. */ 5689 error = iwn_nic_lock(sc); 5690 if (error != 0) 5691 return error; 5692 5693 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5694 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5695 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5696 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5697 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5698 IWN_FW_UPDATED | fw->main.textsz); 5699 iwn_nic_unlock(sc); 5700 5701 return 0; 5702 } 5703 5704 static int 5705 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5706 const uint8_t *section, int size) 5707 { 5708 struct iwn_dma_info *dma = &sc->fw_dma; 5709 int error; 5710 5711 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5712 memcpy(dma->vaddr, section, size); 5713 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5714 5715 error = iwn_nic_lock(sc); 5716 if (error != 0) 5717 return error; 5718 5719 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5720 IWN_FH_TX_CONFIG_DMA_PAUSE); 5721 5722 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5723 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5724 IWN_LOADDR(dma->paddr)); 5725 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5726 IWN_HIADDR(dma->paddr) << 28 | size); 5727 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5728 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5729 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5730 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5731 5732 /* Kick Flow Handler to start DMA transfer. */ 5733 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5734 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5735 5736 iwn_nic_unlock(sc); 5737 5738 /* Wait at most five seconds for FH DMA transfer to complete. */ 5739 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5740 } 5741 5742 static int 5743 iwn5000_load_firmware(struct iwn_softc *sc) 5744 { 5745 struct iwn_fw_part *fw; 5746 int error; 5747 5748 /* Load the initialization firmware on first boot only. */ 5749 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5750 &sc->fw.main : &sc->fw.init; 5751 5752 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5753 fw->text, fw->textsz); 5754 if (error != 0) { 5755 device_printf(sc->sc_dev, 5756 "%s: could not load firmware %s section, error %d\n", 5757 __func__, ".text", error); 5758 return error; 5759 } 5760 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5761 fw->data, fw->datasz); 5762 if (error != 0) { 5763 device_printf(sc->sc_dev, 5764 "%s: could not load firmware %s section, error %d\n", 5765 __func__, ".data", error); 5766 return error; 5767 } 5768 5769 /* Now press "execute". */ 5770 IWN_WRITE(sc, IWN_RESET, 0); 5771 return 0; 5772 } 5773 5774 /* 5775 * Extract text and data sections from a legacy firmware image. 5776 */ 5777 static int 5778 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5779 { 5780 const uint32_t *ptr; 5781 size_t hdrlen = 24; 5782 uint32_t rev; 5783 5784 ptr = (const uint32_t *)sc->fw_fp->data; 5785 rev = le32toh(*ptr++); 5786 5787 /* Check firmware API version. */ 5788 if (IWN_FW_API(rev) <= 1) { 5789 device_printf(sc->sc_dev, 5790 "%s: bad firmware, need API version >=2\n", __func__); 5791 return EINVAL; 5792 } 5793 if (IWN_FW_API(rev) >= 3) { 5794 /* Skip build number (version 2 header). */ 5795 hdrlen += 4; 5796 ptr++; 5797 } 5798 if (fw->size < hdrlen) { 5799 device_printf(sc->sc_dev, 5800 "%s: firmware file too short: %zu bytes\n", 5801 __func__, fw->size); 5802 return EINVAL; 5803 } 5804 fw->main.textsz = le32toh(*ptr++); 5805 fw->main.datasz = le32toh(*ptr++); 5806 fw->init.textsz = le32toh(*ptr++); 5807 fw->init.datasz = le32toh(*ptr++); 5808 fw->boot.textsz = le32toh(*ptr++); 5809 5810 /* Check that all firmware sections fit. */ 5811 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5812 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5813 device_printf(sc->sc_dev, 5814 "%s: firmware file too short: %zu bytes\n", 5815 __func__, fw->size); 5816 return EINVAL; 5817 } 5818 5819 /* Get pointers to firmware sections. */ 5820 fw->main.text = (const uint8_t *)ptr; 5821 fw->main.data = fw->main.text + fw->main.textsz; 5822 fw->init.text = fw->main.data + fw->main.datasz; 5823 fw->init.data = fw->init.text + fw->init.textsz; 5824 fw->boot.text = fw->init.data + fw->init.datasz; 5825 5826 return 0; 5827 } 5828 5829 /* 5830 * Extract text and data sections from a TLV firmware image. 5831 */ 5832 int 5833 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5834 uint16_t alt) 5835 { 5836 const struct iwn_fw_tlv_hdr *hdr; 5837 const struct iwn_fw_tlv *tlv; 5838 const uint8_t *ptr, *end; 5839 uint64_t altmask; 5840 uint32_t len; 5841 5842 if (fw->size < sizeof (*hdr)) { 5843 device_printf(sc->sc_dev, 5844 "%s: firmware file too short: %zu bytes\n", 5845 __func__, fw->size); 5846 return EINVAL; 5847 } 5848 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5849 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5850 device_printf(sc->sc_dev, 5851 "%s: bad firmware file signature 0x%08x\n", 5852 __func__, le32toh(hdr->signature)); 5853 return EINVAL; 5854 } 5855 5856 /* 5857 * Select the closest supported alternative that is less than 5858 * or equal to the specified one. 5859 */ 5860 altmask = le64toh(hdr->altmask); 5861 while (alt > 0 && !(altmask & (1ULL << alt))) 5862 alt--; /* Downgrade. */ 5863 5864 ptr = (const uint8_t *)(hdr + 1); 5865 end = (const uint8_t *)(fw->data + fw->size); 5866 5867 /* Parse type-length-value fields. */ 5868 while (ptr + sizeof (*tlv) <= end) { 5869 tlv = (const struct iwn_fw_tlv *)ptr; 5870 len = le32toh(tlv->len); 5871 5872 ptr += sizeof (*tlv); 5873 if (ptr + len > end) { 5874 device_printf(sc->sc_dev, 5875 "%s: firmware file too short: %zu bytes\n", 5876 __func__, fw->size); 5877 return EINVAL; 5878 } 5879 /* Skip other alternatives. */ 5880 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5881 goto next; 5882 5883 switch (le16toh(tlv->type)) { 5884 case IWN_FW_TLV_MAIN_TEXT: 5885 fw->main.text = ptr; 5886 fw->main.textsz = len; 5887 break; 5888 case IWN_FW_TLV_MAIN_DATA: 5889 fw->main.data = ptr; 5890 fw->main.datasz = len; 5891 break; 5892 case IWN_FW_TLV_INIT_TEXT: 5893 fw->init.text = ptr; 5894 fw->init.textsz = len; 5895 break; 5896 case IWN_FW_TLV_INIT_DATA: 5897 fw->init.data = ptr; 5898 fw->init.datasz = len; 5899 break; 5900 case IWN_FW_TLV_BOOT_TEXT: 5901 fw->boot.text = ptr; 5902 fw->boot.textsz = len; 5903 break; 5904 default: 5905 DPRINTF(sc, IWN_DEBUG_RESET, 5906 "%s: TLV type %d not handled\n", 5907 __func__, le16toh(tlv->type)); 5908 break; 5909 } 5910 next: /* TLV fields are 32-bit aligned. */ 5911 ptr += (len + 3) & ~3; 5912 } 5913 return 0; 5914 } 5915 5916 static int 5917 iwn_read_firmware(struct iwn_softc *sc) 5918 { 5919 const struct iwn_hal *hal = sc->sc_hal; 5920 struct iwn_fw_info *fw = &sc->fw; 5921 int error; 5922 5923 IWN_UNLOCK(sc); 5924 5925 memset(fw, 0, sizeof (*fw)); 5926 5927 /* Read firmware image from filesystem. */ 5928 sc->fw_fp = firmware_get(sc->fwname); 5929 if (sc->fw_fp == NULL) { 5930 device_printf(sc->sc_dev, 5931 "%s: could not load firmare image \"%s\"\n", __func__, 5932 sc->fwname); 5933 IWN_LOCK(sc); 5934 return EINVAL; 5935 } 5936 IWN_LOCK(sc); 5937 5938 fw->size = sc->fw_fp->datasize; 5939 fw->data = (const uint8_t *)sc->fw_fp->data; 5940 if (fw->size < sizeof (uint32_t)) { 5941 device_printf(sc->sc_dev, 5942 "%s: firmware file too short: %zu bytes\n", 5943 __func__, fw->size); 5944 return EINVAL; 5945 } 5946 5947 /* Retrieve text and data sections. */ 5948 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5949 error = iwn_read_firmware_leg(sc, fw); 5950 else 5951 error = iwn_read_firmware_tlv(sc, fw, 1); 5952 if (error != 0) { 5953 device_printf(sc->sc_dev, 5954 "%s: could not read firmware sections\n", __func__); 5955 return error; 5956 } 5957 5958 /* Make sure text and data sections fit in hardware memory. */ 5959 if (fw->main.textsz > hal->fw_text_maxsz || 5960 fw->main.datasz > hal->fw_data_maxsz || 5961 fw->init.textsz > hal->fw_text_maxsz || 5962 fw->init.datasz > hal->fw_data_maxsz || 5963 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5964 (fw->boot.textsz & 3) != 0) { 5965 device_printf(sc->sc_dev, 5966 "%s: firmware sections too large\n", __func__); 5967 return EINVAL; 5968 } 5969 5970 /* We can proceed with loading the firmware. */ 5971 return 0; 5972 } 5973 5974 static int 5975 iwn_clock_wait(struct iwn_softc *sc) 5976 { 5977 int ntries; 5978 5979 /* Set "initialization complete" bit. */ 5980 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5981 5982 /* Wait for clock stabilization. */ 5983 for (ntries = 0; ntries < 2500; ntries++) { 5984 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5985 return 0; 5986 DELAY(10); 5987 } 5988 device_printf(sc->sc_dev, 5989 "%s: timeout waiting for clock stabilization\n", __func__); 5990 return ETIMEDOUT; 5991 } 5992 5993 static int 5994 iwn_apm_init(struct iwn_softc *sc) 5995 { 5996 uint32_t tmp; 5997 int error; 5998 5999 /* Disable L0s exit timer (NMI bug workaround.) */ 6000 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6001 /* Don't wait for ICH L0s (ICH bug workaround.) */ 6002 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6003 6004 /* Set FH wait threshold to max (HW bug under stress workaround.) */ 6005 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6006 6007 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6008 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6009 6010 /* Retrieve PCIe Active State Power Management (ASPM). */ 6011 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6012 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6013 if (tmp & 0x02) /* L1 Entry enabled. */ 6014 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6015 else 6016 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6017 6018 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6019 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6020 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6021 6022 /* Wait for clock stabilization before accessing prph. */ 6023 error = iwn_clock_wait(sc); 6024 if (error != 0) 6025 return error; 6026 6027 error = iwn_nic_lock(sc); 6028 if (error != 0) 6029 return error; 6030 6031 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6032 /* Enable DMA and BSM (Bootstrap State Machine.) */ 6033 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6034 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6035 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6036 } else { 6037 /* Enable DMA. */ 6038 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6039 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6040 } 6041 DELAY(20); 6042 6043 /* Disable L1-Active. */ 6044 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6045 iwn_nic_unlock(sc); 6046 6047 return 0; 6048 } 6049 6050 static void 6051 iwn_apm_stop_master(struct iwn_softc *sc) 6052 { 6053 int ntries; 6054 6055 /* Stop busmaster DMA activity. */ 6056 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6057 for (ntries = 0; ntries < 100; ntries++) { 6058 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6059 return; 6060 DELAY(10); 6061 } 6062 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 6063 __func__); 6064 } 6065 6066 static void 6067 iwn_apm_stop(struct iwn_softc *sc) 6068 { 6069 iwn_apm_stop_master(sc); 6070 6071 /* Reset the entire device. */ 6072 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6073 DELAY(10); 6074 /* Clear "initialization complete" bit. */ 6075 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6076 } 6077 6078 static int 6079 iwn4965_nic_config(struct iwn_softc *sc) 6080 { 6081 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6082 /* 6083 * I don't believe this to be correct but this is what the 6084 * vendor driver is doing. Probably the bits should not be 6085 * shifted in IWN_RFCFG_*. 6086 */ 6087 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6088 IWN_RFCFG_TYPE(sc->rfcfg) | 6089 IWN_RFCFG_STEP(sc->rfcfg) | 6090 IWN_RFCFG_DASH(sc->rfcfg)); 6091 } 6092 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6093 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6094 return 0; 6095 } 6096 6097 static int 6098 iwn5000_nic_config(struct iwn_softc *sc) 6099 { 6100 uint32_t tmp; 6101 int error; 6102 6103 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6104 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6105 IWN_RFCFG_TYPE(sc->rfcfg) | 6106 IWN_RFCFG_STEP(sc->rfcfg) | 6107 IWN_RFCFG_DASH(sc->rfcfg)); 6108 } 6109 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6110 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6111 6112 error = iwn_nic_lock(sc); 6113 if (error != 0) 6114 return error; 6115 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6116 6117 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6118 /* 6119 * Select first Switching Voltage Regulator (1.32V) to 6120 * solve a stability issue related to noisy DC2DC line 6121 * in the silicon of 1000 Series. 6122 */ 6123 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6124 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6125 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6126 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6127 } 6128 iwn_nic_unlock(sc); 6129 6130 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6131 /* Use internal power amplifier only. */ 6132 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6133 } 6134 if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) { 6135 /* Indicate that ROM calibration version is >=6. */ 6136 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6137 } 6138 return 0; 6139 } 6140 6141 /* 6142 * Take NIC ownership over Intel Active Management Technology (AMT). 6143 */ 6144 static int 6145 iwn_hw_prepare(struct iwn_softc *sc) 6146 { 6147 int ntries; 6148 6149 /* Check if hardware is ready. */ 6150 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6151 for (ntries = 0; ntries < 5; ntries++) { 6152 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6153 IWN_HW_IF_CONFIG_NIC_READY) 6154 return 0; 6155 DELAY(10); 6156 } 6157 6158 /* Hardware not ready, force into ready state. */ 6159 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6160 for (ntries = 0; ntries < 15000; ntries++) { 6161 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6162 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6163 break; 6164 DELAY(10); 6165 } 6166 if (ntries == 15000) 6167 return ETIMEDOUT; 6168 6169 /* Hardware should be ready now. */ 6170 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6171 for (ntries = 0; ntries < 5; ntries++) { 6172 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6173 IWN_HW_IF_CONFIG_NIC_READY) 6174 return 0; 6175 DELAY(10); 6176 } 6177 return ETIMEDOUT; 6178 } 6179 6180 static int 6181 iwn_hw_init(struct iwn_softc *sc) 6182 { 6183 const struct iwn_hal *hal = sc->sc_hal; 6184 int error, chnl, qid; 6185 6186 /* Clear pending interrupts. */ 6187 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6188 6189 error = iwn_apm_init(sc); 6190 if (error != 0) { 6191 device_printf(sc->sc_dev, 6192 "%s: could not power ON adapter, error %d\n", 6193 __func__, error); 6194 return error; 6195 } 6196 6197 /* Select VMAIN power source. */ 6198 error = iwn_nic_lock(sc); 6199 if (error != 0) 6200 return error; 6201 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6202 iwn_nic_unlock(sc); 6203 6204 /* Perform adapter-specific initialization. */ 6205 error = hal->nic_config(sc); 6206 if (error != 0) 6207 return error; 6208 6209 /* Initialize RX ring. */ 6210 error = iwn_nic_lock(sc); 6211 if (error != 0) 6212 return error; 6213 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6214 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6215 /* Set physical address of RX ring (256-byte aligned.) */ 6216 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6217 /* Set physical address of RX status (16-byte aligned.) */ 6218 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6219 /* Enable RX. */ 6220 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6221 IWN_FH_RX_CONFIG_ENA | 6222 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6223 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6224 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6225 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6226 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6227 iwn_nic_unlock(sc); 6228 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6229 6230 error = iwn_nic_lock(sc); 6231 if (error != 0) 6232 return error; 6233 6234 /* Initialize TX scheduler. */ 6235 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6236 6237 /* Set physical address of "keep warm" page (16-byte aligned.) */ 6238 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6239 6240 /* Initialize TX rings. */ 6241 for (qid = 0; qid < hal->ntxqs; qid++) { 6242 struct iwn_tx_ring *txq = &sc->txq[qid]; 6243 6244 /* Set physical address of TX ring (256-byte aligned.) */ 6245 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6246 txq->desc_dma.paddr >> 8); 6247 } 6248 iwn_nic_unlock(sc); 6249 6250 /* Enable DMA channels. */ 6251 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6252 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6253 IWN_FH_TX_CONFIG_DMA_ENA | 6254 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6255 } 6256 6257 /* Clear "radio off" and "commands blocked" bits. */ 6258 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6259 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6260 6261 /* Clear pending interrupts. */ 6262 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6263 /* Enable interrupt coalescing. */ 6264 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6265 /* Enable interrupts. */ 6266 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6267 6268 /* _Really_ make sure "radio off" bit is cleared! */ 6269 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6270 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6271 6272 error = hal->load_firmware(sc); 6273 if (error != 0) { 6274 device_printf(sc->sc_dev, 6275 "%s: could not load firmware, error %d\n", 6276 __func__, error); 6277 return error; 6278 } 6279 /* Wait at most one second for firmware alive notification. */ 6280 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 6281 if (error != 0) { 6282 device_printf(sc->sc_dev, 6283 "%s: timeout waiting for adapter to initialize, error %d\n", 6284 __func__, error); 6285 return error; 6286 } 6287 /* Do post-firmware initialization. */ 6288 return hal->post_alive(sc); 6289 } 6290 6291 static void 6292 iwn_hw_stop(struct iwn_softc *sc) 6293 { 6294 const struct iwn_hal *hal = sc->sc_hal; 6295 uint32_t tmp; 6296 int chnl, qid, ntries; 6297 6298 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6299 6300 /* Disable interrupts. */ 6301 IWN_WRITE(sc, IWN_INT_MASK, 0); 6302 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6303 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6304 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6305 6306 /* Make sure we no longer hold the NIC lock. */ 6307 iwn_nic_unlock(sc); 6308 6309 /* Stop TX scheduler. */ 6310 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6311 6312 /* Stop all DMA channels. */ 6313 if (iwn_nic_lock(sc) == 0) { 6314 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6315 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6316 for (ntries = 0; ntries < 200; ntries++) { 6317 tmp = IWN_READ(sc, IWN_FH_TX_STATUS); 6318 if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) == 6319 IWN_FH_TX_STATUS_IDLE(chnl)) 6320 break; 6321 DELAY(10); 6322 } 6323 } 6324 iwn_nic_unlock(sc); 6325 } 6326 6327 /* Stop RX ring. */ 6328 iwn_reset_rx_ring(sc, &sc->rxq); 6329 6330 /* Reset all TX rings. */ 6331 for (qid = 0; qid < hal->ntxqs; qid++) 6332 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6333 6334 if (iwn_nic_lock(sc) == 0) { 6335 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6336 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6337 iwn_nic_unlock(sc); 6338 } 6339 DELAY(5); 6340 6341 /* Power OFF adapter. */ 6342 iwn_apm_stop(sc); 6343 } 6344 6345 static void 6346 iwn_init_locked(struct iwn_softc *sc) 6347 { 6348 struct ifnet *ifp = sc->sc_ifp; 6349 int error; 6350 6351 IWN_LOCK_ASSERT(sc); 6352 6353 error = iwn_hw_prepare(sc); 6354 if (error != 0) { 6355 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n", 6356 __func__, error); 6357 goto fail; 6358 } 6359 6360 /* Initialize interrupt mask to default value. */ 6361 sc->int_mask = IWN_INT_MASK_DEF; 6362 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6363 6364 /* Check that the radio is not disabled by hardware switch. */ 6365 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6366 device_printf(sc->sc_dev, 6367 "radio is disabled by hardware switch\n"); 6368 6369 /* Enable interrupts to get RF toggle notifications. */ 6370 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6371 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6372 return; 6373 } 6374 6375 /* Read firmware images from the filesystem. */ 6376 error = iwn_read_firmware(sc); 6377 if (error != 0) { 6378 device_printf(sc->sc_dev, 6379 "%s: could not read firmware, error %d\n", 6380 __func__, error); 6381 goto fail; 6382 } 6383 6384 /* Initialize hardware and upload firmware. */ 6385 error = iwn_hw_init(sc); 6386 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6387 sc->fw_fp = NULL; 6388 if (error != 0) { 6389 device_printf(sc->sc_dev, 6390 "%s: could not initialize hardware, error %d\n", 6391 __func__, error); 6392 goto fail; 6393 } 6394 6395 /* Configure adapter now that it is ready. */ 6396 error = iwn_config(sc); 6397 if (error != 0) { 6398 device_printf(sc->sc_dev, 6399 "%s: could not configure device, error %d\n", 6400 __func__, error); 6401 goto fail; 6402 } 6403 6404 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6405 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6406 6407 return; 6408 6409 fail: 6410 iwn_stop_locked(sc); 6411 } 6412 6413 static void 6414 iwn_init(void *arg) 6415 { 6416 struct iwn_softc *sc = arg; 6417 struct ifnet *ifp = sc->sc_ifp; 6418 struct ieee80211com *ic = ifp->if_l2com; 6419 6420 IWN_LOCK(sc); 6421 iwn_init_locked(sc); 6422 IWN_UNLOCK(sc); 6423 6424 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6425 ieee80211_start_all(ic); 6426 } 6427 6428 static void 6429 iwn_stop_locked(struct iwn_softc *sc) 6430 { 6431 struct ifnet *ifp = sc->sc_ifp; 6432 6433 IWN_LOCK_ASSERT(sc); 6434 6435 sc->sc_tx_timer = 0; 6436 callout_stop(&sc->sc_timer_to); 6437 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 6438 6439 /* Power OFF hardware. */ 6440 iwn_hw_stop(sc); 6441 } 6442 6443 static void 6444 iwn_stop(struct iwn_softc *sc) 6445 { 6446 IWN_LOCK(sc); 6447 iwn_stop_locked(sc); 6448 IWN_UNLOCK(sc); 6449 } 6450 6451 /* 6452 * Callback from net80211 to start a scan. 6453 */ 6454 static void 6455 iwn_scan_start(struct ieee80211com *ic) 6456 { 6457 struct ifnet *ifp = ic->ic_ifp; 6458 struct iwn_softc *sc = ifp->if_softc; 6459 6460 IWN_LOCK(sc); 6461 /* make the link LED blink while we're scanning */ 6462 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 6463 IWN_UNLOCK(sc); 6464 } 6465 6466 /* 6467 * Callback from net80211 to terminate a scan. 6468 */ 6469 static void 6470 iwn_scan_end(struct ieee80211com *ic) 6471 { 6472 struct ifnet *ifp = ic->ic_ifp; 6473 struct iwn_softc *sc = ifp->if_softc; 6474 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6475 6476 IWN_LOCK(sc); 6477 if (vap->iv_state == IEEE80211_S_RUN) { 6478 /* Set link LED to ON status if we are associated */ 6479 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6480 } 6481 IWN_UNLOCK(sc); 6482 } 6483 6484 /* 6485 * Callback from net80211 to force a channel change. 6486 */ 6487 static void 6488 iwn_set_channel(struct ieee80211com *ic) 6489 { 6490 const struct ieee80211_channel *c = ic->ic_curchan; 6491 struct ifnet *ifp = ic->ic_ifp; 6492 struct iwn_softc *sc = ifp->if_softc; 6493 6494 IWN_LOCK(sc); 6495 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 6496 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 6497 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 6498 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 6499 IWN_UNLOCK(sc); 6500 } 6501 6502 /* 6503 * Callback from net80211 to start scanning of the current channel. 6504 */ 6505 static void 6506 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6507 { 6508 struct ieee80211vap *vap = ss->ss_vap; 6509 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6510 int error; 6511 6512 IWN_LOCK(sc); 6513 error = iwn_scan(sc); 6514 IWN_UNLOCK(sc); 6515 if (error != 0) 6516 ieee80211_cancel_scan(vap); 6517 } 6518 6519 /* 6520 * Callback from net80211 to handle the minimum dwell time being met. 6521 * The intent is to terminate the scan but we just let the firmware 6522 * notify us when it's finished as we have no safe way to abort it. 6523 */ 6524 static void 6525 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 6526 { 6527 /* NB: don't try to abort scan; wait for firmware to finish */ 6528 } 6529 6530 static struct iwn_eeprom_chan * 6531 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 6532 { 6533 int i, j; 6534 6535 for (j = 0; j < 7; j++) { 6536 for (i = 0; i < iwn_bands[j].nchan; i++) { 6537 if (iwn_bands[j].chan[i] == c->ic_ieee) 6538 return &sc->eeprom_channels[j][i]; 6539 } 6540 } 6541 6542 return NULL; 6543 } 6544 6545 /* 6546 * Enforce flags read from EEPROM. 6547 */ 6548 static int 6549 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 6550 int nchan, struct ieee80211_channel chans[]) 6551 { 6552 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6553 int i; 6554 6555 for (i = 0; i < nchan; i++) { 6556 struct ieee80211_channel *c = &chans[i]; 6557 struct iwn_eeprom_chan *channel; 6558 6559 channel = iwn_find_eeprom_channel(sc, c); 6560 if (channel == NULL) { 6561 if_printf(ic->ic_ifp, 6562 "%s: invalid channel %u freq %u/0x%x\n", 6563 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 6564 return EINVAL; 6565 } 6566 c->ic_flags |= iwn_eeprom_channel_flags(channel); 6567 } 6568 6569 return 0; 6570 } 6571 6572 static void 6573 iwn_hw_reset(void *arg0, int pending) 6574 { 6575 struct iwn_softc *sc = arg0; 6576 struct ifnet *ifp = sc->sc_ifp; 6577 struct ieee80211com *ic = ifp->if_l2com; 6578 6579 iwn_stop(sc); 6580 iwn_init(sc); 6581 ieee80211_notify_radio(ic, 1); 6582 } 6583 6584 static void 6585 iwn_radio_on(void *arg0, int pending) 6586 { 6587 struct iwn_softc *sc = arg0; 6588 struct ifnet *ifp = sc->sc_ifp; 6589 struct ieee80211com *ic = ifp->if_l2com; 6590 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6591 6592 if (vap != NULL) { 6593 iwn_init(sc); 6594 ieee80211_init(vap); 6595 } 6596 } 6597 6598 static void 6599 iwn_radio_off(void *arg0, int pending) 6600 { 6601 struct iwn_softc *sc = arg0; 6602 struct ifnet *ifp = sc->sc_ifp; 6603 struct ieee80211com *ic = ifp->if_l2com; 6604 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6605 6606 iwn_stop(sc); 6607 if (vap != NULL) 6608 ieee80211_stop(vap); 6609 6610 /* Enable interrupts to get RF toggle notification. */ 6611 IWN_LOCK(sc); 6612 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6613 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6614 IWN_UNLOCK(sc); 6615 } 6616 6617 static void 6618 iwn_sysctlattach(struct iwn_softc *sc) 6619 { 6620 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 6621 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 6622 6623 #ifdef IWN_DEBUG 6624 sc->sc_debug = 0; 6625 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6626 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 6627 #endif 6628 } 6629 6630 static int 6631 iwn_shutdown(device_t dev) 6632 { 6633 struct iwn_softc *sc = device_get_softc(dev); 6634 6635 iwn_stop(sc); 6636 return 0; 6637 } 6638 6639 static int 6640 iwn_suspend(device_t dev) 6641 { 6642 struct iwn_softc *sc = device_get_softc(dev); 6643 struct ifnet *ifp = sc->sc_ifp; 6644 struct ieee80211com *ic = ifp->if_l2com; 6645 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6646 6647 iwn_stop(sc); 6648 if (vap != NULL) 6649 ieee80211_stop(vap); 6650 return 0; 6651 } 6652 6653 static int 6654 iwn_resume(device_t dev) 6655 { 6656 struct iwn_softc *sc = device_get_softc(dev); 6657 struct ifnet *ifp = sc->sc_ifp; 6658 struct ieee80211com *ic = ifp->if_l2com; 6659 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6660 6661 /* Clear device-specific "PCI retry timeout" register (41h). */ 6662 pci_write_config(dev, 0x41, 0, 1); 6663 6664 if (ifp->if_flags & IFF_UP) { 6665 iwn_init(sc); 6666 if (vap != NULL) 6667 ieee80211_init(vap); 6668 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6669 iwn_start(ifp); 6670 } 6671 return 0; 6672 } 6673 6674 #ifdef IWN_DEBUG 6675 static const char * 6676 iwn_intr_str(uint8_t cmd) 6677 { 6678 switch (cmd) { 6679 /* Notifications */ 6680 case IWN_UC_READY: return "UC_READY"; 6681 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 6682 case IWN_TX_DONE: return "TX_DONE"; 6683 case IWN_START_SCAN: return "START_SCAN"; 6684 case IWN_STOP_SCAN: return "STOP_SCAN"; 6685 case IWN_RX_STATISTICS: return "RX_STATS"; 6686 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 6687 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 6688 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 6689 case IWN_RX_PHY: return "RX_PHY"; 6690 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 6691 case IWN_RX_DONE: return "RX_DONE"; 6692 6693 /* Command Notifications */ 6694 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 6695 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 6696 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 6697 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 6698 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 6699 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 6700 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 6701 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 6702 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 6703 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 6704 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 6705 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 6706 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 6707 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 6708 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 6709 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 6710 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 6711 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 6712 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 6713 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 6714 } 6715 return "UNKNOWN INTR NOTIF/CMD"; 6716 } 6717 #endif /* IWN_DEBUG */ 6718 6719 static device_method_t iwn_methods[] = { 6720 /* Device interface */ 6721 DEVMETHOD(device_probe, iwn_probe), 6722 DEVMETHOD(device_attach, iwn_attach), 6723 DEVMETHOD(device_detach, iwn_detach), 6724 DEVMETHOD(device_shutdown, iwn_shutdown), 6725 DEVMETHOD(device_suspend, iwn_suspend), 6726 DEVMETHOD(device_resume, iwn_resume), 6727 { 0, 0 } 6728 }; 6729 6730 static driver_t iwn_driver = { 6731 "iwn", 6732 iwn_methods, 6733 sizeof (struct iwn_softc) 6734 }; 6735 static devclass_t iwn_devclass; 6736 6737 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 6738 MODULE_DEPEND(iwn, pci, 1, 1, 1); 6739 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 6740 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 6741