1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 23 * adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/sockio.h> 31 #include <sys/sysctl.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/endian.h> 40 #include <sys/firmware.h> 41 #include <sys/limits.h> 42 #include <sys/module.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <machine/clock.h> 49 50 #include <dev/pci/pcireg.h> 51 #include <dev/pci/pcivar.h> 52 53 #include <net/bpf.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/if_ether.h> 65 #include <netinet/ip.h> 66 67 #include <net80211/ieee80211_var.h> 68 #include <net80211/ieee80211_radiotap.h> 69 #include <net80211/ieee80211_regdomain.h> 70 #include <net80211/ieee80211_ratectl.h> 71 72 #include <dev/iwn/if_iwnreg.h> 73 #include <dev/iwn/if_iwnvar.h> 74 75 static int iwn_probe(device_t); 76 static int iwn_attach(device_t); 77 static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *); 78 static void iwn_radiotap_attach(struct iwn_softc *); 79 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 80 const char name[IFNAMSIZ], int unit, int opmode, 81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 82 const uint8_t mac[IEEE80211_ADDR_LEN]); 83 static void iwn_vap_delete(struct ieee80211vap *); 84 static int iwn_cleanup(device_t); 85 static int iwn_detach(device_t); 86 static int iwn_nic_lock(struct iwn_softc *); 87 static int iwn_eeprom_lock(struct iwn_softc *); 88 static int iwn_init_otprom(struct iwn_softc *); 89 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 90 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 91 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 92 void **, bus_size_t, bus_size_t, int); 93 static void iwn_dma_contig_free(struct iwn_dma_info *); 94 static int iwn_alloc_sched(struct iwn_softc *); 95 static void iwn_free_sched(struct iwn_softc *); 96 static int iwn_alloc_kw(struct iwn_softc *); 97 static void iwn_free_kw(struct iwn_softc *); 98 static int iwn_alloc_ict(struct iwn_softc *); 99 static void iwn_free_ict(struct iwn_softc *); 100 static int iwn_alloc_fwmem(struct iwn_softc *); 101 static void iwn_free_fwmem(struct iwn_softc *); 102 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 103 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 104 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 105 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 106 int); 107 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 108 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 109 static void iwn5000_ict_reset(struct iwn_softc *); 110 static int iwn_read_eeprom(struct iwn_softc *, 111 uint8_t macaddr[IEEE80211_ADDR_LEN]); 112 static void iwn4965_read_eeprom(struct iwn_softc *); 113 static void iwn4965_print_power_group(struct iwn_softc *, int); 114 static void iwn5000_read_eeprom(struct iwn_softc *); 115 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 116 static void iwn_read_eeprom_band(struct iwn_softc *, int); 117 #if 0 /* HT */ 118 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 119 #endif 120 static void iwn_read_eeprom_channels(struct iwn_softc *, int, 121 uint32_t); 122 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 123 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 124 const uint8_t mac[IEEE80211_ADDR_LEN]); 125 static int iwn_media_change(struct ifnet *); 126 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 127 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 128 struct iwn_rx_data *); 129 static void iwn_timer_timeout(void *); 130 static void iwn_calib_reset(struct iwn_softc *); 131 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 132 struct iwn_rx_data *); 133 #if 0 /* HT */ 134 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 135 struct iwn_rx_data *); 136 #endif 137 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 138 struct iwn_rx_data *); 139 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 140 struct iwn_rx_data *); 141 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 142 struct iwn_rx_data *); 143 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 144 uint8_t); 145 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 146 static void iwn_notif_intr(struct iwn_softc *); 147 static void iwn_wakeup_intr(struct iwn_softc *); 148 static void iwn_rftoggle_intr(struct iwn_softc *); 149 static void iwn_fatal_intr(struct iwn_softc *); 150 static void iwn_intr(void *); 151 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 152 uint16_t); 153 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 154 uint16_t); 155 #ifdef notyet 156 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 157 #endif 158 static uint8_t iwn_plcp_signal(int); 159 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 160 struct ieee80211_node *, struct iwn_tx_ring *); 161 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 162 const struct ieee80211_bpf_params *); 163 static void iwn_start(struct ifnet *); 164 static void iwn_start_locked(struct ifnet *); 165 static void iwn_watchdog(struct iwn_softc *sc); 166 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 167 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 168 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 169 int); 170 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 171 int); 172 static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int); 173 static int iwn_add_broadcast_node(struct iwn_softc *, int); 174 static int iwn_wme_update(struct ieee80211com *); 175 static void iwn_update_mcast(struct ifnet *); 176 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 177 static int iwn_set_critical_temp(struct iwn_softc *); 178 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 179 static void iwn4965_power_calibration(struct iwn_softc *, int); 180 static int iwn4965_set_txpower(struct iwn_softc *, 181 struct ieee80211_channel *, int); 182 static int iwn5000_set_txpower(struct iwn_softc *, 183 struct ieee80211_channel *, int); 184 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 185 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 186 static int iwn_get_noise(const struct iwn_rx_general_stats *); 187 static int iwn4965_get_temperature(struct iwn_softc *); 188 static int iwn5000_get_temperature(struct iwn_softc *); 189 static int iwn_init_sensitivity(struct iwn_softc *); 190 static void iwn_collect_noise(struct iwn_softc *, 191 const struct iwn_rx_general_stats *); 192 static int iwn4965_init_gains(struct iwn_softc *); 193 static int iwn5000_init_gains(struct iwn_softc *); 194 static int iwn4965_set_gains(struct iwn_softc *); 195 static int iwn5000_set_gains(struct iwn_softc *); 196 static void iwn_tune_sensitivity(struct iwn_softc *, 197 const struct iwn_rx_stats *); 198 static int iwn_send_sensitivity(struct iwn_softc *); 199 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 200 static int iwn_config(struct iwn_softc *); 201 static int iwn_scan(struct iwn_softc *); 202 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 203 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 204 #if 0 /* HT */ 205 static int iwn_ampdu_rx_start(struct ieee80211com *, 206 struct ieee80211_node *, uint8_t); 207 static void iwn_ampdu_rx_stop(struct ieee80211com *, 208 struct ieee80211_node *, uint8_t); 209 static int iwn_ampdu_tx_start(struct ieee80211com *, 210 struct ieee80211_node *, uint8_t); 211 static void iwn_ampdu_tx_stop(struct ieee80211com *, 212 struct ieee80211_node *, uint8_t); 213 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 214 struct ieee80211_node *, uint8_t, uint16_t); 215 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 216 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 217 struct ieee80211_node *, uint8_t, uint16_t); 218 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 219 #endif 220 static int iwn5000_send_calib_results(struct iwn_softc *); 221 static int iwn5000_save_calib_result(struct iwn_softc *, 222 struct iwn_phy_calib *, int, int); 223 static void iwn5000_free_calib_results(struct iwn_softc *); 224 static int iwn5000_chrystal_calib(struct iwn_softc *); 225 static int iwn5000_send_calib_query(struct iwn_softc *, uint32_t); 226 static int iwn5000_rx_calib_result(struct iwn_softc *, 227 struct iwn_rx_desc *, struct iwn_rx_data *); 228 static int iwn5000_send_wimax_coex(struct iwn_softc *); 229 static int iwn4965_post_alive(struct iwn_softc *); 230 static int iwn5000_post_alive(struct iwn_softc *); 231 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 232 int); 233 static int iwn4965_load_firmware(struct iwn_softc *); 234 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 235 const uint8_t *, int); 236 static int iwn5000_load_firmware(struct iwn_softc *); 237 static int iwn_read_firmware_leg(struct iwn_softc *, 238 struct iwn_fw_info *); 239 static int iwn_read_firmware_tlv(struct iwn_softc *, 240 struct iwn_fw_info *, uint16_t); 241 static int iwn_read_firmware(struct iwn_softc *); 242 static int iwn_clock_wait(struct iwn_softc *); 243 static int iwn_apm_init(struct iwn_softc *); 244 static void iwn_apm_stop_master(struct iwn_softc *); 245 static void iwn_apm_stop(struct iwn_softc *); 246 static int iwn4965_nic_config(struct iwn_softc *); 247 static int iwn5000_nic_config(struct iwn_softc *); 248 static int iwn_hw_prepare(struct iwn_softc *); 249 static int iwn_hw_init(struct iwn_softc *); 250 static void iwn_hw_stop(struct iwn_softc *); 251 static void iwn_init_locked(struct iwn_softc *); 252 static void iwn_init(void *); 253 static void iwn_stop_locked(struct iwn_softc *); 254 static void iwn_stop(struct iwn_softc *); 255 static void iwn_scan_start(struct ieee80211com *); 256 static void iwn_scan_end(struct ieee80211com *); 257 static void iwn_set_channel(struct ieee80211com *); 258 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 259 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 260 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 261 struct ieee80211_channel *); 262 static int iwn_setregdomain(struct ieee80211com *, 263 struct ieee80211_regdomain *, int, 264 struct ieee80211_channel []); 265 static void iwn_hw_reset(void *, int); 266 static void iwn_radio_on(void *, int); 267 static void iwn_radio_off(void *, int); 268 static void iwn_sysctlattach(struct iwn_softc *); 269 static int iwn_shutdown(device_t); 270 static int iwn_suspend(device_t); 271 static int iwn_resume(device_t); 272 273 #define IWN_DEBUG 274 #ifdef IWN_DEBUG 275 enum { 276 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 277 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 278 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 279 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 280 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 281 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 282 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 283 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 284 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 285 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 286 IWN_DEBUG_NODE = 0x00000400, /* node management */ 287 IWN_DEBUG_LED = 0x00000800, /* led management */ 288 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 289 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 290 IWN_DEBUG_ANY = 0xffffffff 291 }; 292 293 #define DPRINTF(sc, m, fmt, ...) do { \ 294 if (sc->sc_debug & (m)) \ 295 printf(fmt, __VA_ARGS__); \ 296 } while (0) 297 298 static const char *iwn_intr_str(uint8_t); 299 #else 300 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 301 #endif 302 303 struct iwn_ident { 304 uint16_t vendor; 305 uint16_t device; 306 const char *name; 307 }; 308 309 static const struct iwn_ident iwn_ident_table [] = { 310 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" }, 311 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" }, 312 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" }, 313 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" }, 314 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" }, 315 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" }, 316 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" }, 317 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" }, 318 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" }, 319 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" }, 320 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" }, 321 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" }, 322 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" }, 323 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" }, 324 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" }, 325 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" }, 326 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" }, 327 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" }, 328 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" }, 329 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" }, 330 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" }, 331 { 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" }, 332 { 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" }, 333 { 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" }, 334 #ifdef notyet 335 { 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" }, 336 { 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" }, 337 { 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" }, 338 { 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" }, 339 { 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" }, 340 #endif 341 { 0, 0, NULL } 342 }; 343 344 static const struct iwn_hal iwn4965_hal = { 345 iwn4965_load_firmware, 346 iwn4965_read_eeprom, 347 iwn4965_post_alive, 348 iwn4965_nic_config, 349 iwn4965_update_sched, 350 iwn4965_get_temperature, 351 iwn4965_get_rssi, 352 iwn4965_set_txpower, 353 iwn4965_init_gains, 354 iwn4965_set_gains, 355 iwn4965_add_node, 356 iwn4965_tx_done, 357 #if 0 /* HT */ 358 iwn4965_ampdu_tx_start, 359 iwn4965_ampdu_tx_stop, 360 #endif 361 IWN4965_NTXQUEUES, 362 IWN4965_NDMACHNLS, 363 IWN4965_ID_BROADCAST, 364 IWN4965_RXONSZ, 365 IWN4965_SCHEDSZ, 366 IWN4965_FW_TEXT_MAXSZ, 367 IWN4965_FW_DATA_MAXSZ, 368 IWN4965_FWSZ, 369 IWN4965_SCHED_TXFACT 370 }; 371 372 static const struct iwn_hal iwn5000_hal = { 373 iwn5000_load_firmware, 374 iwn5000_read_eeprom, 375 iwn5000_post_alive, 376 iwn5000_nic_config, 377 iwn5000_update_sched, 378 iwn5000_get_temperature, 379 iwn5000_get_rssi, 380 iwn5000_set_txpower, 381 iwn5000_init_gains, 382 iwn5000_set_gains, 383 iwn5000_add_node, 384 iwn5000_tx_done, 385 #if 0 /* HT */ 386 iwn5000_ampdu_tx_start, 387 iwn5000_ampdu_tx_stop, 388 #endif 389 IWN5000_NTXQUEUES, 390 IWN5000_NDMACHNLS, 391 IWN5000_ID_BROADCAST, 392 IWN5000_RXONSZ, 393 IWN5000_SCHEDSZ, 394 IWN5000_FW_TEXT_MAXSZ, 395 IWN5000_FW_DATA_MAXSZ, 396 IWN5000_FWSZ, 397 IWN5000_SCHED_TXFACT 398 }; 399 400 static int 401 iwn_probe(device_t dev) 402 { 403 const struct iwn_ident *ident; 404 405 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 406 if (pci_get_vendor(dev) == ident->vendor && 407 pci_get_device(dev) == ident->device) { 408 device_set_desc(dev, ident->name); 409 return 0; 410 } 411 } 412 return ENXIO; 413 } 414 415 static int 416 iwn_attach(device_t dev) 417 { 418 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 419 struct ieee80211com *ic; 420 struct ifnet *ifp; 421 const struct iwn_hal *hal; 422 uint32_t tmp; 423 int i, error, result; 424 uint8_t macaddr[IEEE80211_ADDR_LEN]; 425 426 sc->sc_dev = dev; 427 428 /* 429 * Get the offset of the PCI Express Capability Structure in PCI 430 * Configuration Space. 431 */ 432 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 433 if (error != 0) { 434 device_printf(dev, "PCIe capability structure not found!\n"); 435 return error; 436 } 437 438 /* Clear device-specific "PCI retry timeout" register (41h). */ 439 pci_write_config(dev, 0x41, 0, 1); 440 441 /* Hardware bug workaround. */ 442 tmp = pci_read_config(dev, PCIR_COMMAND, 1); 443 if (tmp & PCIM_CMD_INTxDIS) { 444 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 445 __func__); 446 tmp &= ~PCIM_CMD_INTxDIS; 447 pci_write_config(dev, PCIR_COMMAND, tmp, 1); 448 } 449 450 /* Enable bus-mastering. */ 451 pci_enable_busmaster(dev); 452 453 sc->mem_rid = PCIR_BAR(0); 454 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 455 RF_ACTIVE); 456 if (sc->mem == NULL ) { 457 device_printf(dev, "could not allocate memory resources\n"); 458 error = ENOMEM; 459 return error; 460 } 461 462 sc->sc_st = rman_get_bustag(sc->mem); 463 sc->sc_sh = rman_get_bushandle(sc->mem); 464 sc->irq_rid = 0; 465 if ((result = pci_msi_count(dev)) == 1 && 466 pci_alloc_msi(dev, &result) == 0) 467 sc->irq_rid = 1; 468 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 469 RF_ACTIVE | RF_SHAREABLE); 470 if (sc->irq == NULL) { 471 device_printf(dev, "could not allocate interrupt resource\n"); 472 error = ENOMEM; 473 goto fail; 474 } 475 476 IWN_LOCK_INIT(sc); 477 callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0); 478 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc ); 479 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc ); 480 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc ); 481 482 /* Attach Hardware Abstraction Layer. */ 483 hal = iwn_hal_attach(sc); 484 if (hal == NULL) { 485 error = ENXIO; /* XXX: Wrong error code? */ 486 goto fail; 487 } 488 489 error = iwn_hw_prepare(sc); 490 if (error != 0) { 491 device_printf(dev, "hardware not ready, error %d\n", error); 492 goto fail; 493 } 494 495 /* Allocate DMA memory for firmware transfers. */ 496 error = iwn_alloc_fwmem(sc); 497 if (error != 0) { 498 device_printf(dev, 499 "could not allocate memory for firmware, error %d\n", 500 error); 501 goto fail; 502 } 503 504 /* Allocate "Keep Warm" page. */ 505 error = iwn_alloc_kw(sc); 506 if (error != 0) { 507 device_printf(dev, 508 "could not allocate \"Keep Warm\" page, error %d\n", error); 509 goto fail; 510 } 511 512 /* Allocate ICT table for 5000 Series. */ 513 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 514 (error = iwn_alloc_ict(sc)) != 0) { 515 device_printf(dev, 516 "%s: could not allocate ICT table, error %d\n", 517 __func__, error); 518 goto fail; 519 } 520 521 /* Allocate TX scheduler "rings". */ 522 error = iwn_alloc_sched(sc); 523 if (error != 0) { 524 device_printf(dev, 525 "could not allocate TX scheduler rings, error %d\n", 526 error); 527 goto fail; 528 } 529 530 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */ 531 for (i = 0; i < hal->ntxqs; i++) { 532 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i); 533 if (error != 0) { 534 device_printf(dev, 535 "could not allocate Tx ring %d, error %d\n", 536 i, error); 537 goto fail; 538 } 539 } 540 541 /* Allocate RX ring. */ 542 error = iwn_alloc_rx_ring(sc, &sc->rxq); 543 if (error != 0 ){ 544 device_printf(dev, 545 "could not allocate Rx ring, error %d\n", error); 546 goto fail; 547 } 548 549 /* Clear pending interrupts. */ 550 IWN_WRITE(sc, IWN_INT, 0xffffffff); 551 552 /* Count the number of available chains. */ 553 sc->ntxchains = 554 ((sc->txchainmask >> 2) & 1) + 555 ((sc->txchainmask >> 1) & 1) + 556 ((sc->txchainmask >> 0) & 1); 557 sc->nrxchains = 558 ((sc->rxchainmask >> 2) & 1) + 559 ((sc->rxchainmask >> 1) & 1) + 560 ((sc->rxchainmask >> 0) & 1); 561 562 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 563 if (ifp == NULL) { 564 device_printf(dev, "can not allocate ifnet structure\n"); 565 goto fail; 566 } 567 ic = ifp->if_l2com; 568 569 ic->ic_ifp = ifp; 570 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 571 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 572 573 /* Set device capabilities. */ 574 ic->ic_caps = 575 IEEE80211_C_STA /* station mode supported */ 576 | IEEE80211_C_MONITOR /* monitor mode supported */ 577 | IEEE80211_C_TXPMGT /* tx power management */ 578 | IEEE80211_C_SHSLOT /* short slot time supported */ 579 | IEEE80211_C_WPA 580 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 581 | IEEE80211_C_BGSCAN /* background scanning */ 582 #if 0 583 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 584 #endif 585 | IEEE80211_C_WME /* WME */ 586 ; 587 #if 0 /* HT */ 588 /* XXX disable until HT channel setup works */ 589 ic->ic_htcaps = 590 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ 591 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ 592 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 593 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 594 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ 595 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 596 /* s/w capabilities */ 597 | IEEE80211_HTC_HT /* HT operation */ 598 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 599 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 600 ; 601 602 /* Set HT capabilities. */ 603 ic->ic_htcaps = 604 #if IWN_RBUF_SIZE == 8192 605 IEEE80211_HTCAP_AMSDU7935 | 606 #endif 607 IEEE80211_HTCAP_CBW20_40 | 608 IEEE80211_HTCAP_SGI20 | 609 IEEE80211_HTCAP_SGI40; 610 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 611 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 612 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 613 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 614 else 615 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 616 #endif 617 618 /* Read MAC address, channels, etc from EEPROM. */ 619 error = iwn_read_eeprom(sc, macaddr); 620 if (error != 0) { 621 device_printf(dev, "could not read EEPROM, error %d\n", 622 error); 623 goto fail; 624 } 625 626 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n", 627 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 628 macaddr, ":"); 629 630 #if 0 /* HT */ 631 /* Set supported HT rates. */ 632 ic->ic_sup_mcs[0] = 0xff; 633 if (sc->nrxchains > 1) 634 ic->ic_sup_mcs[1] = 0xff; 635 if (sc->nrxchains > 2) 636 ic->ic_sup_mcs[2] = 0xff; 637 #endif 638 639 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 640 ifp->if_softc = sc; 641 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 642 ifp->if_init = iwn_init; 643 ifp->if_ioctl = iwn_ioctl; 644 ifp->if_start = iwn_start; 645 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 646 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 647 IFQ_SET_READY(&ifp->if_snd); 648 649 ieee80211_ifattach(ic, macaddr); 650 ic->ic_vap_create = iwn_vap_create; 651 ic->ic_vap_delete = iwn_vap_delete; 652 ic->ic_raw_xmit = iwn_raw_xmit; 653 ic->ic_node_alloc = iwn_node_alloc; 654 ic->ic_wme.wme_update = iwn_wme_update; 655 ic->ic_update_mcast = iwn_update_mcast; 656 ic->ic_scan_start = iwn_scan_start; 657 ic->ic_scan_end = iwn_scan_end; 658 ic->ic_set_channel = iwn_set_channel; 659 ic->ic_scan_curchan = iwn_scan_curchan; 660 ic->ic_scan_mindwell = iwn_scan_mindwell; 661 ic->ic_setregdomain = iwn_setregdomain; 662 #if 0 /* HT */ 663 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 664 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 665 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 666 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 667 #endif 668 669 iwn_radiotap_attach(sc); 670 iwn_sysctlattach(sc); 671 672 /* 673 * Hook our interrupt after all initialization is complete. 674 */ 675 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 676 NULL, iwn_intr, sc, &sc->sc_ih); 677 if (error != 0) { 678 device_printf(dev, "could not set up interrupt, error %d\n", 679 error); 680 goto fail; 681 } 682 683 ieee80211_announce(ic); 684 return 0; 685 fail: 686 iwn_cleanup(dev); 687 return error; 688 } 689 690 static const struct iwn_hal * 691 iwn_hal_attach(struct iwn_softc *sc) 692 { 693 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 694 695 switch (sc->hw_type) { 696 case IWN_HW_REV_TYPE_4965: 697 sc->sc_hal = &iwn4965_hal; 698 sc->limits = &iwn4965_sensitivity_limits; 699 sc->fwname = "iwn4965fw"; 700 sc->txchainmask = IWN_ANT_AB; 701 sc->rxchainmask = IWN_ANT_ABC; 702 break; 703 case IWN_HW_REV_TYPE_5100: 704 sc->sc_hal = &iwn5000_hal; 705 sc->limits = &iwn5000_sensitivity_limits; 706 sc->fwname = "iwn5000fw"; 707 sc->txchainmask = IWN_ANT_B; 708 sc->rxchainmask = IWN_ANT_AB; 709 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 710 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 711 IWN_CALIB_BASE_BAND; 712 break; 713 case IWN_HW_REV_TYPE_5150: 714 sc->sc_hal = &iwn5000_hal; 715 sc->limits = &iwn5150_sensitivity_limits; 716 sc->fwname = "iwn5150fw"; 717 sc->txchainmask = IWN_ANT_A; 718 sc->rxchainmask = IWN_ANT_AB; 719 sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO | 720 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 721 break; 722 case IWN_HW_REV_TYPE_5300: 723 case IWN_HW_REV_TYPE_5350: 724 sc->sc_hal = &iwn5000_hal; 725 sc->limits = &iwn5000_sensitivity_limits; 726 sc->fwname = "iwn5000fw"; 727 sc->txchainmask = IWN_ANT_ABC; 728 sc->rxchainmask = IWN_ANT_ABC; 729 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 730 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 731 IWN_CALIB_BASE_BAND; 732 break; 733 case IWN_HW_REV_TYPE_1000: 734 sc->sc_hal = &iwn5000_hal; 735 sc->limits = &iwn1000_sensitivity_limits; 736 sc->fwname = "iwn1000fw"; 737 sc->txchainmask = IWN_ANT_A; 738 sc->rxchainmask = IWN_ANT_AB; 739 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 740 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 741 IWN_CALIB_BASE_BAND; 742 break; 743 case IWN_HW_REV_TYPE_6000: 744 sc->sc_hal = &iwn5000_hal; 745 sc->limits = &iwn6000_sensitivity_limits; 746 sc->fwname = "iwn6000fw"; 747 switch (pci_get_device(sc->sc_dev)) { 748 case 0x422C: 749 case 0x4239: 750 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 751 sc->txchainmask = IWN_ANT_BC; 752 sc->rxchainmask = IWN_ANT_BC; 753 break; 754 default: 755 sc->txchainmask = IWN_ANT_ABC; 756 sc->rxchainmask = IWN_ANT_ABC; 757 sc->calib_runtime = IWN_CALIB_DC; 758 break; 759 } 760 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 761 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 762 break; 763 case IWN_HW_REV_TYPE_6050: 764 sc->sc_hal = &iwn5000_hal; 765 sc->limits = &iwn6000_sensitivity_limits; 766 sc->fwname = "iwn6050fw"; 767 sc->txchainmask = IWN_ANT_AB; 768 sc->rxchainmask = IWN_ANT_AB; 769 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 770 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 771 sc->calib_runtime = IWN_CALIB_DC; 772 break; 773 case IWN_HW_REV_TYPE_6005: 774 sc->sc_hal = &iwn5000_hal; 775 sc->limits = &iwn6000_sensitivity_limits; 776 sc->fwname = "iwn6005fw"; 777 sc->txchainmask = IWN_ANT_AB; 778 sc->rxchainmask = IWN_ANT_AB; 779 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 780 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 781 sc->calib_runtime = IWN_CALIB_DC; 782 break; 783 default: 784 device_printf(sc->sc_dev, "adapter type %d not supported\n", 785 sc->hw_type); 786 return NULL; 787 } 788 return sc->sc_hal; 789 } 790 791 /* 792 * Attach the interface to 802.11 radiotap. 793 */ 794 static void 795 iwn_radiotap_attach(struct iwn_softc *sc) 796 { 797 struct ifnet *ifp = sc->sc_ifp; 798 struct ieee80211com *ic = ifp->if_l2com; 799 800 ieee80211_radiotap_attach(ic, 801 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 802 IWN_TX_RADIOTAP_PRESENT, 803 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 804 IWN_RX_RADIOTAP_PRESENT); 805 } 806 807 static struct ieee80211vap * 808 iwn_vap_create(struct ieee80211com *ic, 809 const char name[IFNAMSIZ], int unit, int opmode, int flags, 810 const uint8_t bssid[IEEE80211_ADDR_LEN], 811 const uint8_t mac[IEEE80211_ADDR_LEN]) 812 { 813 struct iwn_vap *ivp; 814 struct ieee80211vap *vap; 815 816 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 817 return NULL; 818 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 819 M_80211_VAP, M_NOWAIT | M_ZERO); 820 if (ivp == NULL) 821 return NULL; 822 vap = &ivp->iv_vap; 823 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 824 vap->iv_bmissthreshold = 10; /* override default */ 825 /* Override with driver methods. */ 826 ivp->iv_newstate = vap->iv_newstate; 827 vap->iv_newstate = iwn_newstate; 828 829 ieee80211_ratectl_init(vap); 830 /* Complete setup. */ 831 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 832 ic->ic_opmode = opmode; 833 return vap; 834 } 835 836 static void 837 iwn_vap_delete(struct ieee80211vap *vap) 838 { 839 struct iwn_vap *ivp = IWN_VAP(vap); 840 841 ieee80211_ratectl_deinit(vap); 842 ieee80211_vap_detach(vap); 843 free(ivp, M_80211_VAP); 844 } 845 846 static int 847 iwn_cleanup(device_t dev) 848 { 849 struct iwn_softc *sc = device_get_softc(dev); 850 struct ifnet *ifp = sc->sc_ifp; 851 struct ieee80211com *ic; 852 int i; 853 854 if (ifp != NULL) { 855 ic = ifp->if_l2com; 856 857 ieee80211_draintask(ic, &sc->sc_reinit_task); 858 ieee80211_draintask(ic, &sc->sc_radioon_task); 859 ieee80211_draintask(ic, &sc->sc_radiooff_task); 860 861 iwn_stop(sc); 862 callout_drain(&sc->sc_timer_to); 863 ieee80211_ifdetach(ic); 864 } 865 866 iwn5000_free_calib_results(sc); 867 868 /* Free DMA resources. */ 869 iwn_free_rx_ring(sc, &sc->rxq); 870 if (sc->sc_hal != NULL) 871 for (i = 0; i < sc->sc_hal->ntxqs; i++) 872 iwn_free_tx_ring(sc, &sc->txq[i]); 873 iwn_free_sched(sc); 874 iwn_free_kw(sc); 875 if (sc->ict != NULL) 876 iwn_free_ict(sc); 877 iwn_free_fwmem(sc); 878 879 if (sc->irq != NULL) { 880 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 881 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 882 if (sc->irq_rid == 1) 883 pci_release_msi(dev); 884 } 885 886 if (sc->mem != NULL) 887 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 888 889 if (ifp != NULL) 890 if_free(ifp); 891 892 IWN_LOCK_DESTROY(sc); 893 return 0; 894 } 895 896 static int 897 iwn_detach(device_t dev) 898 { 899 iwn_cleanup(dev); 900 return 0; 901 } 902 903 static int 904 iwn_nic_lock(struct iwn_softc *sc) 905 { 906 int ntries; 907 908 /* Request exclusive access to NIC. */ 909 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 910 911 /* Spin until we actually get the lock. */ 912 for (ntries = 0; ntries < 1000; ntries++) { 913 if ((IWN_READ(sc, IWN_GP_CNTRL) & 914 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 915 IWN_GP_CNTRL_MAC_ACCESS_ENA) 916 return 0; 917 DELAY(10); 918 } 919 return ETIMEDOUT; 920 } 921 922 static __inline void 923 iwn_nic_unlock(struct iwn_softc *sc) 924 { 925 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 926 } 927 928 static __inline uint32_t 929 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 930 { 931 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 932 IWN_BARRIER_READ_WRITE(sc); 933 return IWN_READ(sc, IWN_PRPH_RDATA); 934 } 935 936 static __inline void 937 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 938 { 939 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 940 IWN_BARRIER_WRITE(sc); 941 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 942 } 943 944 static __inline void 945 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 946 { 947 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 948 } 949 950 static __inline void 951 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 952 { 953 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 954 } 955 956 static __inline void 957 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 958 const uint32_t *data, int count) 959 { 960 for (; count > 0; count--, data++, addr += 4) 961 iwn_prph_write(sc, addr, *data); 962 } 963 964 static __inline uint32_t 965 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 966 { 967 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 968 IWN_BARRIER_READ_WRITE(sc); 969 return IWN_READ(sc, IWN_MEM_RDATA); 970 } 971 972 static __inline void 973 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 974 { 975 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 976 IWN_BARRIER_WRITE(sc); 977 IWN_WRITE(sc, IWN_MEM_WDATA, data); 978 } 979 980 static __inline void 981 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 982 { 983 uint32_t tmp; 984 985 tmp = iwn_mem_read(sc, addr & ~3); 986 if (addr & 3) 987 tmp = (tmp & 0x0000ffff) | data << 16; 988 else 989 tmp = (tmp & 0xffff0000) | data; 990 iwn_mem_write(sc, addr & ~3, tmp); 991 } 992 993 static __inline void 994 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 995 int count) 996 { 997 for (; count > 0; count--, addr += 4) 998 *data++ = iwn_mem_read(sc, addr); 999 } 1000 1001 static __inline void 1002 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1003 int count) 1004 { 1005 for (; count > 0; count--, addr += 4) 1006 iwn_mem_write(sc, addr, val); 1007 } 1008 1009 static int 1010 iwn_eeprom_lock(struct iwn_softc *sc) 1011 { 1012 int i, ntries; 1013 1014 for (i = 0; i < 100; i++) { 1015 /* Request exclusive access to EEPROM. */ 1016 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1017 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1018 1019 /* Spin until we actually get the lock. */ 1020 for (ntries = 0; ntries < 100; ntries++) { 1021 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1022 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1023 return 0; 1024 DELAY(10); 1025 } 1026 } 1027 return ETIMEDOUT; 1028 } 1029 1030 static __inline void 1031 iwn_eeprom_unlock(struct iwn_softc *sc) 1032 { 1033 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1034 } 1035 1036 /* 1037 * Initialize access by host to One Time Programmable ROM. 1038 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1039 */ 1040 static int 1041 iwn_init_otprom(struct iwn_softc *sc) 1042 { 1043 uint16_t prev, base, next; 1044 int count, error; 1045 1046 /* Wait for clock stabilization before accessing prph. */ 1047 error = iwn_clock_wait(sc); 1048 if (error != 0) 1049 return error; 1050 1051 error = iwn_nic_lock(sc); 1052 if (error != 0) 1053 return error; 1054 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1055 DELAY(5); 1056 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1057 iwn_nic_unlock(sc); 1058 1059 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1060 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1061 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1062 IWN_RESET_LINK_PWR_MGMT_DIS); 1063 } 1064 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1065 /* Clear ECC status. */ 1066 IWN_SETBITS(sc, IWN_OTP_GP, 1067 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1068 1069 /* 1070 * Find the block before last block (contains the EEPROM image) 1071 * for HW without OTP shadow RAM. 1072 */ 1073 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1074 /* Switch to absolute addressing mode. */ 1075 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1076 base = prev = 0; 1077 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1078 error = iwn_read_prom_data(sc, base, &next, 2); 1079 if (error != 0) 1080 return error; 1081 if (next == 0) /* End of linked-list. */ 1082 break; 1083 prev = base; 1084 base = le16toh(next); 1085 } 1086 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1087 return EIO; 1088 /* Skip "next" word. */ 1089 sc->prom_base = prev + 1; 1090 } 1091 return 0; 1092 } 1093 1094 static int 1095 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1096 { 1097 uint32_t val, tmp; 1098 int ntries; 1099 uint8_t *out = data; 1100 1101 addr += sc->prom_base; 1102 for (; count > 0; count -= 2, addr++) { 1103 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1104 for (ntries = 0; ntries < 10; ntries++) { 1105 val = IWN_READ(sc, IWN_EEPROM); 1106 if (val & IWN_EEPROM_READ_VALID) 1107 break; 1108 DELAY(5); 1109 } 1110 if (ntries == 10) { 1111 device_printf(sc->sc_dev, 1112 "timeout reading ROM at 0x%x\n", addr); 1113 return ETIMEDOUT; 1114 } 1115 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1116 /* OTPROM, check for ECC errors. */ 1117 tmp = IWN_READ(sc, IWN_OTP_GP); 1118 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1119 device_printf(sc->sc_dev, 1120 "OTPROM ECC error at 0x%x\n", addr); 1121 return EIO; 1122 } 1123 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1124 /* Correctable ECC error, clear bit. */ 1125 IWN_SETBITS(sc, IWN_OTP_GP, 1126 IWN_OTP_GP_ECC_CORR_STTS); 1127 } 1128 } 1129 *out++ = val >> 16; 1130 if (count > 1) 1131 *out++ = val >> 24; 1132 } 1133 return 0; 1134 } 1135 1136 static void 1137 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1138 { 1139 if (error != 0) 1140 return; 1141 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1142 *(bus_addr_t *)arg = segs[0].ds_addr; 1143 } 1144 1145 static int 1146 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1147 void **kvap, bus_size_t size, bus_size_t alignment, int flags) 1148 { 1149 int error; 1150 1151 dma->size = size; 1152 dma->tag = NULL; 1153 1154 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1155 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1156 1, size, flags, NULL, NULL, &dma->tag); 1157 if (error != 0) { 1158 device_printf(sc->sc_dev, 1159 "%s: bus_dma_tag_create failed, error %d\n", 1160 __func__, error); 1161 goto fail; 1162 } 1163 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1164 flags | BUS_DMA_ZERO, &dma->map); 1165 if (error != 0) { 1166 device_printf(sc->sc_dev, 1167 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error); 1168 goto fail; 1169 } 1170 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 1171 size, iwn_dma_map_addr, &dma->paddr, flags); 1172 if (error != 0) { 1173 device_printf(sc->sc_dev, 1174 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1175 goto fail; 1176 } 1177 1178 if (kvap != NULL) 1179 *kvap = dma->vaddr; 1180 return 0; 1181 fail: 1182 iwn_dma_contig_free(dma); 1183 return error; 1184 } 1185 1186 static void 1187 iwn_dma_contig_free(struct iwn_dma_info *dma) 1188 { 1189 if (dma->tag != NULL) { 1190 if (dma->map != NULL) { 1191 if (dma->paddr == 0) { 1192 bus_dmamap_sync(dma->tag, dma->map, 1193 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1194 bus_dmamap_unload(dma->tag, dma->map); 1195 } 1196 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); 1197 } 1198 bus_dma_tag_destroy(dma->tag); 1199 } 1200 } 1201 1202 static int 1203 iwn_alloc_sched(struct iwn_softc *sc) 1204 { 1205 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1206 return iwn_dma_contig_alloc(sc, &sc->sched_dma, 1207 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT); 1208 } 1209 1210 static void 1211 iwn_free_sched(struct iwn_softc *sc) 1212 { 1213 iwn_dma_contig_free(&sc->sched_dma); 1214 } 1215 1216 static int 1217 iwn_alloc_kw(struct iwn_softc *sc) 1218 { 1219 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1220 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096, 1221 BUS_DMA_NOWAIT); 1222 } 1223 1224 static void 1225 iwn_free_kw(struct iwn_softc *sc) 1226 { 1227 iwn_dma_contig_free(&sc->kw_dma); 1228 } 1229 1230 static int 1231 iwn_alloc_ict(struct iwn_softc *sc) 1232 { 1233 /* ICT table must be aligned on a 4KB boundary. */ 1234 return iwn_dma_contig_alloc(sc, &sc->ict_dma, 1235 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT); 1236 } 1237 1238 static void 1239 iwn_free_ict(struct iwn_softc *sc) 1240 { 1241 iwn_dma_contig_free(&sc->ict_dma); 1242 } 1243 1244 static int 1245 iwn_alloc_fwmem(struct iwn_softc *sc) 1246 { 1247 /* Must be aligned on a 16-byte boundary. */ 1248 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1249 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT); 1250 } 1251 1252 static void 1253 iwn_free_fwmem(struct iwn_softc *sc) 1254 { 1255 iwn_dma_contig_free(&sc->fw_dma); 1256 } 1257 1258 static int 1259 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1260 { 1261 bus_size_t size; 1262 int i, error; 1263 1264 ring->cur = 0; 1265 1266 /* Allocate RX descriptors (256-byte aligned). */ 1267 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1268 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1269 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1270 if (error != 0) { 1271 device_printf(sc->sc_dev, 1272 "%s: could not allocate Rx ring DMA memory, error %d\n", 1273 __func__, error); 1274 goto fail; 1275 } 1276 1277 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1278 BUS_SPACE_MAXADDR_32BIT, 1279 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, 1280 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1281 if (error != 0) { 1282 device_printf(sc->sc_dev, 1283 "%s: bus_dma_tag_create_failed, error %d\n", 1284 __func__, error); 1285 goto fail; 1286 } 1287 1288 /* Allocate RX status area (16-byte aligned). */ 1289 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, 1290 (void **)&ring->stat, sizeof (struct iwn_rx_status), 1291 16, BUS_DMA_NOWAIT); 1292 if (error != 0) { 1293 device_printf(sc->sc_dev, 1294 "%s: could not allocate Rx status DMA memory, error %d\n", 1295 __func__, error); 1296 goto fail; 1297 } 1298 1299 /* 1300 * Allocate and map RX buffers. 1301 */ 1302 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1303 struct iwn_rx_data *data = &ring->data[i]; 1304 bus_addr_t paddr; 1305 1306 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1307 if (error != 0) { 1308 device_printf(sc->sc_dev, 1309 "%s: bus_dmamap_create failed, error %d\n", 1310 __func__, error); 1311 goto fail; 1312 } 1313 1314 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1315 if (data->m == NULL) { 1316 device_printf(sc->sc_dev, 1317 "%s: could not allocate rx mbuf\n", __func__); 1318 error = ENOMEM; 1319 goto fail; 1320 } 1321 1322 /* Map page. */ 1323 error = bus_dmamap_load(ring->data_dmat, data->map, 1324 mtod(data->m, caddr_t), MJUMPAGESIZE, 1325 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1326 if (error != 0 && error != EFBIG) { 1327 device_printf(sc->sc_dev, 1328 "%s: bus_dmamap_load failed, error %d\n", 1329 __func__, error); 1330 m_freem(data->m); 1331 error = ENOMEM; /* XXX unique code */ 1332 goto fail; 1333 } 1334 bus_dmamap_sync(ring->data_dmat, data->map, 1335 BUS_DMASYNC_PREWRITE); 1336 1337 /* Set physical address of RX buffer (256-byte aligned). */ 1338 ring->desc[i] = htole32(paddr >> 8); 1339 } 1340 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1341 BUS_DMASYNC_PREWRITE); 1342 return 0; 1343 fail: 1344 iwn_free_rx_ring(sc, ring); 1345 return error; 1346 } 1347 1348 static void 1349 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1350 { 1351 int ntries; 1352 1353 if (iwn_nic_lock(sc) == 0) { 1354 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1355 for (ntries = 0; ntries < 1000; ntries++) { 1356 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1357 IWN_FH_RX_STATUS_IDLE) 1358 break; 1359 DELAY(10); 1360 } 1361 iwn_nic_unlock(sc); 1362 #ifdef IWN_DEBUG 1363 if (ntries == 1000) 1364 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 1365 "timeout resetting Rx ring"); 1366 #endif 1367 } 1368 ring->cur = 0; 1369 sc->last_rx_valid = 0; 1370 } 1371 1372 static void 1373 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1374 { 1375 int i; 1376 1377 iwn_dma_contig_free(&ring->desc_dma); 1378 iwn_dma_contig_free(&ring->stat_dma); 1379 1380 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1381 struct iwn_rx_data *data = &ring->data[i]; 1382 1383 if (data->m != NULL) { 1384 bus_dmamap_sync(ring->data_dmat, data->map, 1385 BUS_DMASYNC_POSTREAD); 1386 bus_dmamap_unload(ring->data_dmat, data->map); 1387 m_freem(data->m); 1388 } 1389 if (data->map != NULL) 1390 bus_dmamap_destroy(ring->data_dmat, data->map); 1391 } 1392 } 1393 1394 static int 1395 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1396 { 1397 bus_size_t size; 1398 bus_addr_t paddr; 1399 int i, error; 1400 1401 ring->qid = qid; 1402 ring->queued = 0; 1403 ring->cur = 0; 1404 1405 /* Allocate TX descriptors (256-byte aligned.) */ 1406 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc); 1407 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1408 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1409 if (error != 0) { 1410 device_printf(sc->sc_dev, 1411 "%s: could not allocate TX ring DMA memory, error %d\n", 1412 __func__, error); 1413 goto fail; 1414 } 1415 1416 /* 1417 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1418 * to allocate commands space for other rings. 1419 */ 1420 if (qid > 4) 1421 return 0; 1422 1423 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd); 1424 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, 1425 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT); 1426 if (error != 0) { 1427 device_printf(sc->sc_dev, 1428 "%s: could not allocate TX cmd DMA memory, error %d\n", 1429 __func__, error); 1430 goto fail; 1431 } 1432 1433 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1434 BUS_SPACE_MAXADDR_32BIT, 1435 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, 1436 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1437 if (error != 0) { 1438 device_printf(sc->sc_dev, 1439 "%s: bus_dma_tag_create_failed, error %d\n", 1440 __func__, error); 1441 goto fail; 1442 } 1443 1444 paddr = ring->cmd_dma.paddr; 1445 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1446 struct iwn_tx_data *data = &ring->data[i]; 1447 1448 data->cmd_paddr = paddr; 1449 data->scratch_paddr = paddr + 12; 1450 paddr += sizeof (struct iwn_tx_cmd); 1451 1452 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1453 if (error != 0) { 1454 device_printf(sc->sc_dev, 1455 "%s: bus_dmamap_create failed, error %d\n", 1456 __func__, error); 1457 goto fail; 1458 } 1459 bus_dmamap_sync(ring->data_dmat, data->map, 1460 BUS_DMASYNC_PREWRITE); 1461 } 1462 return 0; 1463 fail: 1464 iwn_free_tx_ring(sc, ring); 1465 return error; 1466 } 1467 1468 static void 1469 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1470 { 1471 int i; 1472 1473 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1474 struct iwn_tx_data *data = &ring->data[i]; 1475 1476 if (data->m != NULL) { 1477 bus_dmamap_unload(ring->data_dmat, data->map); 1478 m_freem(data->m); 1479 data->m = NULL; 1480 } 1481 } 1482 /* Clear TX descriptors. */ 1483 memset(ring->desc, 0, ring->desc_dma.size); 1484 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1485 BUS_DMASYNC_PREWRITE); 1486 sc->qfullmsk &= ~(1 << ring->qid); 1487 ring->queued = 0; 1488 ring->cur = 0; 1489 } 1490 1491 static void 1492 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1493 { 1494 int i; 1495 1496 iwn_dma_contig_free(&ring->desc_dma); 1497 iwn_dma_contig_free(&ring->cmd_dma); 1498 1499 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1500 struct iwn_tx_data *data = &ring->data[i]; 1501 1502 if (data->m != NULL) { 1503 bus_dmamap_sync(ring->data_dmat, data->map, 1504 BUS_DMASYNC_POSTWRITE); 1505 bus_dmamap_unload(ring->data_dmat, data->map); 1506 m_freem(data->m); 1507 } 1508 if (data->map != NULL) 1509 bus_dmamap_destroy(ring->data_dmat, data->map); 1510 } 1511 } 1512 1513 static void 1514 iwn5000_ict_reset(struct iwn_softc *sc) 1515 { 1516 /* Disable interrupts. */ 1517 IWN_WRITE(sc, IWN_INT_MASK, 0); 1518 1519 /* Reset ICT table. */ 1520 memset(sc->ict, 0, IWN_ICT_SIZE); 1521 sc->ict_cur = 0; 1522 1523 /* Set physical address of ICT table (4KB aligned.) */ 1524 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1525 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1526 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1527 1528 /* Enable periodic RX interrupt. */ 1529 sc->int_mask |= IWN_INT_RX_PERIODIC; 1530 /* Switch to ICT interrupt mode in driver. */ 1531 sc->sc_flags |= IWN_FLAG_USE_ICT; 1532 1533 /* Re-enable interrupts. */ 1534 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1535 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1536 } 1537 1538 static int 1539 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1540 { 1541 const struct iwn_hal *hal = sc->sc_hal; 1542 int error; 1543 uint16_t val; 1544 1545 /* Check whether adapter has an EEPROM or an OTPROM. */ 1546 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1547 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1548 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1549 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1550 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1551 1552 /* Adapter has to be powered on for EEPROM access to work. */ 1553 error = iwn_apm_init(sc); 1554 if (error != 0) { 1555 device_printf(sc->sc_dev, 1556 "%s: could not power ON adapter, error %d\n", 1557 __func__, error); 1558 return error; 1559 } 1560 1561 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1562 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1563 return EIO; 1564 } 1565 error = iwn_eeprom_lock(sc); 1566 if (error != 0) { 1567 device_printf(sc->sc_dev, 1568 "%s: could not lock ROM, error %d\n", 1569 __func__, error); 1570 return error; 1571 } 1572 1573 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1574 error = iwn_init_otprom(sc); 1575 if (error != 0) { 1576 device_printf(sc->sc_dev, 1577 "%s: could not initialize OTPROM, error %d\n", 1578 __func__, error); 1579 return error; 1580 } 1581 } 1582 1583 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1584 sc->rfcfg = le16toh(val); 1585 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1586 1587 /* Read MAC address. */ 1588 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1589 1590 /* Read adapter-specific information from EEPROM. */ 1591 hal->read_eeprom(sc); 1592 1593 iwn_apm_stop(sc); /* Power OFF adapter. */ 1594 1595 iwn_eeprom_unlock(sc); 1596 return 0; 1597 } 1598 1599 static void 1600 iwn4965_read_eeprom(struct iwn_softc *sc) 1601 { 1602 uint32_t addr; 1603 int i; 1604 uint16_t val; 1605 1606 /* Read regulatory domain (4 ASCII characters.) */ 1607 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1608 1609 /* Read the list of authorized channels (20MHz ones only.) */ 1610 for (i = 0; i < 5; i++) { 1611 addr = iwn4965_regulatory_bands[i]; 1612 iwn_read_eeprom_channels(sc, i, addr); 1613 } 1614 1615 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1616 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1617 sc->maxpwr2GHz = val & 0xff; 1618 sc->maxpwr5GHz = val >> 8; 1619 /* Check that EEPROM values are within valid range. */ 1620 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1621 sc->maxpwr5GHz = 38; 1622 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1623 sc->maxpwr2GHz = 38; 1624 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1625 sc->maxpwr2GHz, sc->maxpwr5GHz); 1626 1627 /* Read samples for each TX power group. */ 1628 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1629 sizeof sc->bands); 1630 1631 /* Read voltage at which samples were taken. */ 1632 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1633 sc->eeprom_voltage = (int16_t)le16toh(val); 1634 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1635 sc->eeprom_voltage); 1636 1637 #ifdef IWN_DEBUG 1638 /* Print samples. */ 1639 if (sc->sc_debug & IWN_DEBUG_ANY) { 1640 for (i = 0; i < IWN_NBANDS; i++) 1641 iwn4965_print_power_group(sc, i); 1642 } 1643 #endif 1644 } 1645 1646 #ifdef IWN_DEBUG 1647 static void 1648 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1649 { 1650 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1651 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1652 int j, c; 1653 1654 printf("===band %d===\n", i); 1655 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1656 printf("chan1 num=%d\n", chans[0].num); 1657 for (c = 0; c < 2; c++) { 1658 for (j = 0; j < IWN_NSAMPLES; j++) { 1659 printf("chain %d, sample %d: temp=%d gain=%d " 1660 "power=%d pa_det=%d\n", c, j, 1661 chans[0].samples[c][j].temp, 1662 chans[0].samples[c][j].gain, 1663 chans[0].samples[c][j].power, 1664 chans[0].samples[c][j].pa_det); 1665 } 1666 } 1667 printf("chan2 num=%d\n", chans[1].num); 1668 for (c = 0; c < 2; c++) { 1669 for (j = 0; j < IWN_NSAMPLES; j++) { 1670 printf("chain %d, sample %d: temp=%d gain=%d " 1671 "power=%d pa_det=%d\n", c, j, 1672 chans[1].samples[c][j].temp, 1673 chans[1].samples[c][j].gain, 1674 chans[1].samples[c][j].power, 1675 chans[1].samples[c][j].pa_det); 1676 } 1677 } 1678 } 1679 #endif 1680 1681 static void 1682 iwn5000_read_eeprom(struct iwn_softc *sc) 1683 { 1684 struct iwn5000_eeprom_calib_hdr hdr; 1685 int32_t temp, volt; 1686 uint32_t addr, base; 1687 int i; 1688 uint16_t val; 1689 1690 /* Read regulatory domain (4 ASCII characters.) */ 1691 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1692 base = le16toh(val); 1693 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1694 sc->eeprom_domain, 4); 1695 1696 /* Read the list of authorized channels (20MHz ones only.) */ 1697 for (i = 0; i < 5; i++) { 1698 addr = base + iwn5000_regulatory_bands[i]; 1699 iwn_read_eeprom_channels(sc, i, addr); 1700 } 1701 1702 /* Read enhanced TX power information for 6000 Series. */ 1703 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1704 iwn_read_eeprom_enhinfo(sc); 1705 1706 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1707 base = le16toh(val); 1708 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1709 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1710 "%s: calib version=%u pa type=%u voltage=%u\n", 1711 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1712 sc->calib_ver = hdr.version; 1713 1714 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1715 /* Compute temperature offset. */ 1716 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1717 temp = le16toh(val); 1718 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1719 volt = le16toh(val); 1720 sc->temp_off = temp - (volt / -5); 1721 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1722 temp, volt, sc->temp_off); 1723 } 1724 } 1725 1726 /* 1727 * Translate EEPROM flags to net80211. 1728 */ 1729 static uint32_t 1730 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1731 { 1732 uint32_t nflags; 1733 1734 nflags = 0; 1735 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1736 nflags |= IEEE80211_CHAN_PASSIVE; 1737 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1738 nflags |= IEEE80211_CHAN_NOADHOC; 1739 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1740 nflags |= IEEE80211_CHAN_DFS; 1741 /* XXX apparently IBSS may still be marked */ 1742 nflags |= IEEE80211_CHAN_NOADHOC; 1743 } 1744 1745 return nflags; 1746 } 1747 1748 static void 1749 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1750 { 1751 struct ifnet *ifp = sc->sc_ifp; 1752 struct ieee80211com *ic = ifp->if_l2com; 1753 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1754 const struct iwn_chan_band *band = &iwn_bands[n]; 1755 struct ieee80211_channel *c; 1756 int i, chan, nflags; 1757 1758 for (i = 0; i < band->nchan; i++) { 1759 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1760 DPRINTF(sc, IWN_DEBUG_RESET, 1761 "skip chan %d flags 0x%x maxpwr %d\n", 1762 band->chan[i], channels[i].flags, 1763 channels[i].maxpwr); 1764 continue; 1765 } 1766 chan = band->chan[i]; 1767 nflags = iwn_eeprom_channel_flags(&channels[i]); 1768 1769 DPRINTF(sc, IWN_DEBUG_RESET, 1770 "add chan %d flags 0x%x maxpwr %d\n", 1771 chan, channels[i].flags, channels[i].maxpwr); 1772 1773 c = &ic->ic_channels[ic->ic_nchans++]; 1774 c->ic_ieee = chan; 1775 c->ic_maxregpower = channels[i].maxpwr; 1776 c->ic_maxpower = 2*c->ic_maxregpower; 1777 1778 /* Save maximum allowed TX power for this channel. */ 1779 sc->maxpwr[chan] = channels[i].maxpwr; 1780 1781 if (n == 0) { /* 2GHz band */ 1782 c->ic_freq = ieee80211_ieee2mhz(chan, 1783 IEEE80211_CHAN_G); 1784 1785 /* G =>'s B is supported */ 1786 c->ic_flags = IEEE80211_CHAN_B | nflags; 1787 1788 c = &ic->ic_channels[ic->ic_nchans++]; 1789 c[0] = c[-1]; 1790 c->ic_flags = IEEE80211_CHAN_G | nflags; 1791 } else { /* 5GHz band */ 1792 c->ic_freq = ieee80211_ieee2mhz(chan, 1793 IEEE80211_CHAN_A); 1794 c->ic_flags = IEEE80211_CHAN_A | nflags; 1795 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1796 } 1797 #if 0 /* HT */ 1798 /* XXX no constraints on using HT20 */ 1799 /* add HT20, HT40 added separately */ 1800 c = &ic->ic_channels[ic->ic_nchans++]; 1801 c[0] = c[-1]; 1802 c->ic_flags |= IEEE80211_CHAN_HT20; 1803 /* XXX NARROW =>'s 1/2 and 1/4 width? */ 1804 #endif 1805 } 1806 } 1807 1808 #if 0 /* HT */ 1809 static void 1810 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1811 { 1812 struct ifnet *ifp = sc->sc_ifp; 1813 struct ieee80211com *ic = ifp->if_l2com; 1814 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1815 const struct iwn_chan_band *band = &iwn_bands[n]; 1816 struct ieee80211_channel *c, *cent, *extc; 1817 int i; 1818 1819 for (i = 0; i < band->nchan; i++) { 1820 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) || 1821 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) { 1822 DPRINTF(sc, IWN_DEBUG_RESET, 1823 "skip chan %d flags 0x%x maxpwr %d\n", 1824 band->chan[i], channels[i].flags, 1825 channels[i].maxpwr); 1826 continue; 1827 } 1828 /* 1829 * Each entry defines an HT40 channel pair; find the 1830 * center channel, then the extension channel above. 1831 */ 1832 cent = ieee80211_find_channel_byieee(ic, band->chan[i], 1833 band->flags & ~IEEE80211_CHAN_HT); 1834 if (cent == NULL) { /* XXX shouldn't happen */ 1835 device_printf(sc->sc_dev, 1836 "%s: no entry for channel %d\n", 1837 __func__, band->chan[i]); 1838 continue; 1839 } 1840 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1841 band->flags & ~IEEE80211_CHAN_HT); 1842 if (extc == NULL) { 1843 DPRINTF(sc, IWN_DEBUG_RESET, 1844 "skip chan %d, extension channel not found\n", 1845 band->chan[i]); 1846 continue; 1847 } 1848 1849 DPRINTF(sc, IWN_DEBUG_RESET, 1850 "add ht40 chan %d flags 0x%x maxpwr %d\n", 1851 band->chan[i], channels[i].flags, channels[i].maxpwr); 1852 1853 c = &ic->ic_channels[ic->ic_nchans++]; 1854 c[0] = cent[0]; 1855 c->ic_extieee = extc->ic_ieee; 1856 c->ic_flags &= ~IEEE80211_CHAN_HT; 1857 c->ic_flags |= IEEE80211_CHAN_HT40U; 1858 c = &ic->ic_channels[ic->ic_nchans++]; 1859 c[0] = extc[0]; 1860 c->ic_extieee = cent->ic_ieee; 1861 c->ic_flags &= ~IEEE80211_CHAN_HT; 1862 c->ic_flags |= IEEE80211_CHAN_HT40D; 1863 } 1864 } 1865 #endif 1866 1867 static void 1868 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1869 { 1870 struct ifnet *ifp = sc->sc_ifp; 1871 struct ieee80211com *ic = ifp->if_l2com; 1872 1873 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 1874 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 1875 1876 if (n < 5) 1877 iwn_read_eeprom_band(sc, n); 1878 #if 0 /* HT */ 1879 else 1880 iwn_read_eeprom_ht40(sc, n); 1881 #endif 1882 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1883 } 1884 1885 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0])) 1886 1887 static void 1888 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1889 { 1890 struct iwn_eeprom_enhinfo enhinfo[35]; 1891 uint16_t val, base; 1892 int8_t maxpwr; 1893 int i; 1894 1895 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1896 base = le16toh(val); 1897 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1898 enhinfo, sizeof enhinfo); 1899 1900 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1901 for (i = 0; i < nitems(enhinfo); i++) { 1902 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1903 continue; /* Skip invalid entries. */ 1904 1905 maxpwr = 0; 1906 if (sc->txchainmask & IWN_ANT_A) 1907 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1908 if (sc->txchainmask & IWN_ANT_B) 1909 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1910 if (sc->txchainmask & IWN_ANT_C) 1911 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1912 if (sc->ntxchains == 2) 1913 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1914 else if (sc->ntxchains == 3) 1915 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1916 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1917 1918 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i, 1919 maxpwr); 1920 sc->enh_maxpwr[i] = maxpwr; 1921 } 1922 } 1923 1924 static struct ieee80211_node * 1925 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1926 { 1927 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 1928 } 1929 1930 static int 1931 iwn_media_change(struct ifnet *ifp) 1932 { 1933 int error = ieee80211_media_change(ifp); 1934 /* NB: only the fixed rate can change and that doesn't need a reset */ 1935 return (error == ENETRESET ? 0 : error); 1936 } 1937 1938 static int 1939 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1940 { 1941 struct iwn_vap *ivp = IWN_VAP(vap); 1942 struct ieee80211com *ic = vap->iv_ic; 1943 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1944 int error; 1945 1946 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1947 ieee80211_state_name[vap->iv_state], 1948 ieee80211_state_name[nstate]); 1949 1950 IEEE80211_UNLOCK(ic); 1951 IWN_LOCK(sc); 1952 callout_stop(&sc->sc_timer_to); 1953 1954 switch (nstate) { 1955 case IEEE80211_S_ASSOC: 1956 if (vap->iv_state != IEEE80211_S_RUN) 1957 break; 1958 /* FALLTHROUGH */ 1959 case IEEE80211_S_AUTH: 1960 if (vap->iv_state == IEEE80211_S_AUTH) 1961 break; 1962 1963 /* 1964 * !AUTH -> AUTH transition requires state reset to handle 1965 * reassociations correctly. 1966 */ 1967 sc->rxon.associd = 0; 1968 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1969 iwn_calib_reset(sc); 1970 error = iwn_auth(sc, vap); 1971 break; 1972 1973 case IEEE80211_S_RUN: 1974 /* 1975 * RUN -> RUN transition; Just restart the timers. 1976 */ 1977 if (vap->iv_state == IEEE80211_S_RUN && 1978 vap->iv_opmode != IEEE80211_M_MONITOR) { 1979 iwn_calib_reset(sc); 1980 break; 1981 } 1982 1983 /* 1984 * !RUN -> RUN requires setting the association id 1985 * which is done with a firmware cmd. We also defer 1986 * starting the timers until that work is done. 1987 */ 1988 error = iwn_run(sc, vap); 1989 break; 1990 1991 default: 1992 break; 1993 } 1994 IWN_UNLOCK(sc); 1995 IEEE80211_LOCK(ic); 1996 return ivp->iv_newstate(vap, nstate, arg); 1997 } 1998 1999 /* 2000 * Process an RX_PHY firmware notification. This is usually immediately 2001 * followed by an MPDU_RX_DONE notification. 2002 */ 2003 static void 2004 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2005 struct iwn_rx_data *data) 2006 { 2007 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2008 2009 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2010 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2011 2012 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2013 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2014 sc->last_rx_valid = 1; 2015 } 2016 2017 static void 2018 iwn_timer_timeout(void *arg) 2019 { 2020 struct iwn_softc *sc = arg; 2021 uint32_t flags = 0; 2022 2023 IWN_LOCK_ASSERT(sc); 2024 2025 if (sc->calib_cnt && --sc->calib_cnt == 0) { 2026 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2027 "send statistics request"); 2028 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2029 sizeof flags, 1); 2030 sc->calib_cnt = 60; /* do calibration every 60s */ 2031 } 2032 iwn_watchdog(sc); /* NB: piggyback tx watchdog */ 2033 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2034 } 2035 2036 static void 2037 iwn_calib_reset(struct iwn_softc *sc) 2038 { 2039 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2040 sc->calib_cnt = 60; /* do calibration every 60s */ 2041 } 2042 2043 /* 2044 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2045 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2046 */ 2047 static void 2048 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2049 struct iwn_rx_data *data) 2050 { 2051 const struct iwn_hal *hal = sc->sc_hal; 2052 struct ifnet *ifp = sc->sc_ifp; 2053 struct ieee80211com *ic = ifp->if_l2com; 2054 struct iwn_rx_ring *ring = &sc->rxq; 2055 struct ieee80211_frame *wh; 2056 struct ieee80211_node *ni; 2057 struct mbuf *m, *m1; 2058 struct iwn_rx_stat *stat; 2059 caddr_t head; 2060 bus_addr_t paddr; 2061 uint32_t flags; 2062 int error, len, rssi, nf; 2063 2064 if (desc->type == IWN_MPDU_RX_DONE) { 2065 /* Check for prior RX_PHY notification. */ 2066 if (!sc->last_rx_valid) { 2067 DPRINTF(sc, IWN_DEBUG_ANY, 2068 "%s: missing RX_PHY\n", __func__); 2069 ifp->if_ierrors++; 2070 return; 2071 } 2072 sc->last_rx_valid = 0; 2073 stat = &sc->last_rx_stat; 2074 } else 2075 stat = (struct iwn_rx_stat *)(desc + 1); 2076 2077 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2078 2079 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2080 device_printf(sc->sc_dev, 2081 "%s: invalid rx statistic header, len %d\n", 2082 __func__, stat->cfg_phy_len); 2083 ifp->if_ierrors++; 2084 return; 2085 } 2086 if (desc->type == IWN_MPDU_RX_DONE) { 2087 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2088 head = (caddr_t)(mpdu + 1); 2089 len = le16toh(mpdu->len); 2090 } else { 2091 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2092 len = le16toh(stat->len); 2093 } 2094 2095 flags = le32toh(*(uint32_t *)(head + len)); 2096 2097 /* Discard frames with a bad FCS early. */ 2098 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2099 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n", 2100 __func__, flags); 2101 ifp->if_ierrors++; 2102 return; 2103 } 2104 /* Discard frames that are too short. */ 2105 if (len < sizeof (*wh)) { 2106 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2107 __func__, len); 2108 ifp->if_ierrors++; 2109 return; 2110 } 2111 2112 /* XXX don't need mbuf, just dma buffer */ 2113 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 2114 if (m1 == NULL) { 2115 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2116 __func__); 2117 ifp->if_ierrors++; 2118 return; 2119 } 2120 bus_dmamap_unload(ring->data_dmat, data->map); 2121 2122 error = bus_dmamap_load(ring->data_dmat, data->map, 2123 mtod(m1, caddr_t), MJUMPAGESIZE, 2124 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2125 if (error != 0 && error != EFBIG) { 2126 device_printf(sc->sc_dev, 2127 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2128 m_freem(m1); 2129 ifp->if_ierrors++; 2130 return; 2131 } 2132 2133 m = data->m; 2134 data->m = m1; 2135 /* Update RX descriptor. */ 2136 ring->desc[ring->cur] = htole32(paddr >> 8); 2137 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2138 BUS_DMASYNC_PREWRITE); 2139 2140 /* Finalize mbuf. */ 2141 m->m_pkthdr.rcvif = ifp; 2142 m->m_data = head; 2143 m->m_pkthdr.len = m->m_len = len; 2144 2145 rssi = hal->get_rssi(sc, stat); 2146 2147 /* Grab a reference to the source node. */ 2148 wh = mtod(m, struct ieee80211_frame *); 2149 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2150 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2151 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2152 2153 if (ieee80211_radiotap_active(ic)) { 2154 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2155 2156 tap->wr_tsft = htole64(stat->tstamp); 2157 tap->wr_flags = 0; 2158 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2159 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2160 switch (stat->rate) { 2161 /* CCK rates. */ 2162 case 10: tap->wr_rate = 2; break; 2163 case 20: tap->wr_rate = 4; break; 2164 case 55: tap->wr_rate = 11; break; 2165 case 110: tap->wr_rate = 22; break; 2166 /* OFDM rates. */ 2167 case 0xd: tap->wr_rate = 12; break; 2168 case 0xf: tap->wr_rate = 18; break; 2169 case 0x5: tap->wr_rate = 24; break; 2170 case 0x7: tap->wr_rate = 36; break; 2171 case 0x9: tap->wr_rate = 48; break; 2172 case 0xb: tap->wr_rate = 72; break; 2173 case 0x1: tap->wr_rate = 96; break; 2174 case 0x3: tap->wr_rate = 108; break; 2175 /* Unknown rate: should not happen. */ 2176 default: tap->wr_rate = 0; 2177 } 2178 tap->wr_dbm_antsignal = rssi; 2179 tap->wr_dbm_antnoise = nf; 2180 } 2181 2182 IWN_UNLOCK(sc); 2183 2184 /* Send the frame to the 802.11 layer. */ 2185 if (ni != NULL) { 2186 (void) ieee80211_input(ni, m, rssi - nf, nf); 2187 /* Node is no longer needed. */ 2188 ieee80211_free_node(ni); 2189 } else 2190 (void) ieee80211_input_all(ic, m, rssi - nf, nf); 2191 2192 IWN_LOCK(sc); 2193 } 2194 2195 #if 0 /* HT */ 2196 /* Process an incoming Compressed BlockAck. */ 2197 static void 2198 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2199 struct iwn_rx_data *data) 2200 { 2201 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2202 struct iwn_tx_ring *txq; 2203 2204 txq = &sc->txq[letoh16(ba->qid)]; 2205 /* XXX TBD */ 2206 } 2207 #endif 2208 2209 /* 2210 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2211 * The latter is sent by the firmware after each received beacon. 2212 */ 2213 static void 2214 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2215 struct iwn_rx_data *data) 2216 { 2217 const struct iwn_hal *hal = sc->sc_hal; 2218 struct ifnet *ifp = sc->sc_ifp; 2219 struct ieee80211com *ic = ifp->if_l2com; 2220 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2221 struct iwn_calib_state *calib = &sc->calib; 2222 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2223 int temp; 2224 2225 /* Beacon stats are meaningful only when associated and not scanning. */ 2226 if (vap->iv_state != IEEE80211_S_RUN || 2227 (ic->ic_flags & IEEE80211_F_SCAN)) 2228 return; 2229 2230 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2231 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type); 2232 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */ 2233 2234 /* Test if temperature has changed. */ 2235 if (stats->general.temp != sc->rawtemp) { 2236 /* Convert "raw" temperature to degC. */ 2237 sc->rawtemp = stats->general.temp; 2238 temp = hal->get_temperature(sc); 2239 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2240 __func__, temp); 2241 2242 /* Update TX power if need be (4965AGN only.) */ 2243 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2244 iwn4965_power_calibration(sc, temp); 2245 } 2246 2247 if (desc->type != IWN_BEACON_STATISTICS) 2248 return; /* Reply to a statistics request. */ 2249 2250 sc->noise = iwn_get_noise(&stats->rx.general); 2251 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2252 2253 /* Test that RSSI and noise are present in stats report. */ 2254 if (le32toh(stats->rx.general.flags) != 1) { 2255 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2256 "received statistics without RSSI"); 2257 return; 2258 } 2259 2260 if (calib->state == IWN_CALIB_STATE_ASSOC) 2261 iwn_collect_noise(sc, &stats->rx.general); 2262 else if (calib->state == IWN_CALIB_STATE_RUN) 2263 iwn_tune_sensitivity(sc, &stats->rx); 2264 } 2265 2266 /* 2267 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2268 * and 5000 adapters have different incompatible TX status formats. 2269 */ 2270 static void 2271 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2272 struct iwn_rx_data *data) 2273 { 2274 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2275 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2276 2277 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2278 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2279 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2280 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2281 le32toh(stat->status)); 2282 2283 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2284 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2285 } 2286 2287 static void 2288 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2289 struct iwn_rx_data *data) 2290 { 2291 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2292 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2293 2294 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2295 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2296 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2297 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2298 le32toh(stat->status)); 2299 2300 #ifdef notyet 2301 /* Reset TX scheduler slot. */ 2302 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2303 #endif 2304 2305 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2306 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2307 } 2308 2309 /* 2310 * Adapter-independent backend for TX_DONE firmware notifications. 2311 */ 2312 static void 2313 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2314 uint8_t status) 2315 { 2316 struct ifnet *ifp = sc->sc_ifp; 2317 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2318 struct iwn_tx_data *data = &ring->data[desc->idx]; 2319 struct mbuf *m; 2320 struct ieee80211_node *ni; 2321 struct ieee80211vap *vap; 2322 2323 KASSERT(data->ni != NULL, ("no node")); 2324 2325 /* Unmap and free mbuf. */ 2326 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2327 bus_dmamap_unload(ring->data_dmat, data->map); 2328 m = data->m, data->m = NULL; 2329 ni = data->ni, data->ni = NULL; 2330 vap = ni->ni_vap; 2331 2332 if (m->m_flags & M_TXCB) { 2333 /* 2334 * Channels marked for "radar" require traffic to be received 2335 * to unlock before we can transmit. Until traffic is seen 2336 * any attempt to transmit is returned immediately with status 2337 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2338 * happen on first authenticate after scanning. To workaround 2339 * this we ignore a failure of this sort in AUTH state so the 2340 * 802.11 layer will fall back to using a timeout to wait for 2341 * the AUTH reply. This allows the firmware time to see 2342 * traffic so a subsequent retry of AUTH succeeds. It's 2343 * unclear why the firmware does not maintain state for 2344 * channels recently visited as this would allow immediate 2345 * use of the channel after a scan (where we see traffic). 2346 */ 2347 if (status == IWN_TX_FAIL_TX_LOCKED && 2348 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2349 ieee80211_process_callback(ni, m, 0); 2350 else 2351 ieee80211_process_callback(ni, m, 2352 (status & IWN_TX_FAIL) != 0); 2353 } 2354 2355 /* 2356 * Update rate control statistics for the node. 2357 */ 2358 if (status & 0x80) { 2359 ifp->if_oerrors++; 2360 ieee80211_ratectl_tx_complete(vap, ni, 2361 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2362 } else { 2363 ieee80211_ratectl_tx_complete(vap, ni, 2364 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2365 } 2366 m_freem(m); 2367 ieee80211_free_node(ni); 2368 2369 sc->sc_tx_timer = 0; 2370 if (--ring->queued < IWN_TX_RING_LOMARK) { 2371 sc->qfullmsk &= ~(1 << ring->qid); 2372 if (sc->qfullmsk == 0 && 2373 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2374 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2375 iwn_start_locked(ifp); 2376 } 2377 } 2378 } 2379 2380 /* 2381 * Process a "command done" firmware notification. This is where we wakeup 2382 * processes waiting for a synchronous command completion. 2383 */ 2384 static void 2385 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2386 { 2387 struct iwn_tx_ring *ring = &sc->txq[4]; 2388 struct iwn_tx_data *data; 2389 2390 if ((desc->qid & 0xf) != 4) 2391 return; /* Not a command ack. */ 2392 2393 data = &ring->data[desc->idx]; 2394 2395 /* If the command was mapped in an mbuf, free it. */ 2396 if (data->m != NULL) { 2397 bus_dmamap_unload(ring->data_dmat, data->map); 2398 m_freem(data->m); 2399 data->m = NULL; 2400 } 2401 wakeup(&ring->desc[desc->idx]); 2402 } 2403 2404 /* 2405 * Process an INT_FH_RX or INT_SW_RX interrupt. 2406 */ 2407 static void 2408 iwn_notif_intr(struct iwn_softc *sc) 2409 { 2410 struct ifnet *ifp = sc->sc_ifp; 2411 struct ieee80211com *ic = ifp->if_l2com; 2412 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2413 uint16_t hw; 2414 2415 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 2416 BUS_DMASYNC_POSTREAD); 2417 2418 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2419 while (sc->rxq.cur != hw) { 2420 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2421 struct iwn_rx_desc *desc; 2422 2423 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2424 BUS_DMASYNC_POSTREAD); 2425 desc = mtod(data->m, struct iwn_rx_desc *); 2426 2427 DPRINTF(sc, IWN_DEBUG_RECV, 2428 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 2429 __func__, desc->qid & 0xf, desc->idx, desc->flags, 2430 desc->type, iwn_intr_str(desc->type), 2431 le16toh(desc->len)); 2432 2433 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2434 iwn_cmd_done(sc, desc); 2435 2436 switch (desc->type) { 2437 case IWN_RX_PHY: 2438 iwn_rx_phy(sc, desc, data); 2439 break; 2440 2441 case IWN_RX_DONE: /* 4965AGN only. */ 2442 case IWN_MPDU_RX_DONE: 2443 /* An 802.11 frame has been received. */ 2444 iwn_rx_done(sc, desc, data); 2445 break; 2446 2447 #if 0 /* HT */ 2448 case IWN_RX_COMPRESSED_BA: 2449 /* A Compressed BlockAck has been received. */ 2450 iwn_rx_compressed_ba(sc, desc, data); 2451 break; 2452 #endif 2453 2454 case IWN_TX_DONE: 2455 /* An 802.11 frame has been transmitted. */ 2456 sc->sc_hal->tx_done(sc, desc, data); 2457 break; 2458 2459 case IWN_RX_STATISTICS: 2460 case IWN_BEACON_STATISTICS: 2461 iwn_rx_statistics(sc, desc, data); 2462 break; 2463 2464 case IWN_BEACON_MISSED: 2465 { 2466 struct iwn_beacon_missed *miss = 2467 (struct iwn_beacon_missed *)(desc + 1); 2468 int misses; 2469 2470 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2471 BUS_DMASYNC_POSTREAD); 2472 misses = le32toh(miss->consecutive); 2473 2474 /* XXX not sure why we're notified w/ zero */ 2475 if (misses == 0) 2476 break; 2477 DPRINTF(sc, IWN_DEBUG_STATE, 2478 "%s: beacons missed %d/%d\n", __func__, 2479 misses, le32toh(miss->total)); 2480 2481 /* 2482 * If more than 5 consecutive beacons are missed, 2483 * reinitialize the sensitivity state machine. 2484 */ 2485 if (vap->iv_state == IEEE80211_S_RUN && misses > 5) 2486 (void) iwn_init_sensitivity(sc); 2487 if (misses >= vap->iv_bmissthreshold) { 2488 IWN_UNLOCK(sc); 2489 ieee80211_beacon_miss(ic); 2490 IWN_LOCK(sc); 2491 } 2492 break; 2493 } 2494 case IWN_UC_READY: 2495 { 2496 struct iwn_ucode_info *uc = 2497 (struct iwn_ucode_info *)(desc + 1); 2498 2499 /* The microcontroller is ready. */ 2500 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2501 BUS_DMASYNC_POSTREAD); 2502 DPRINTF(sc, IWN_DEBUG_RESET, 2503 "microcode alive notification version=%d.%d " 2504 "subtype=%x alive=%x\n", uc->major, uc->minor, 2505 uc->subtype, le32toh(uc->valid)); 2506 2507 if (le32toh(uc->valid) != 1) { 2508 device_printf(sc->sc_dev, 2509 "microcontroller initialization failed"); 2510 break; 2511 } 2512 if (uc->subtype == IWN_UCODE_INIT) { 2513 /* Save microcontroller report. */ 2514 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2515 } 2516 /* Save the address of the error log in SRAM. */ 2517 sc->errptr = le32toh(uc->errptr); 2518 break; 2519 } 2520 case IWN_STATE_CHANGED: 2521 { 2522 uint32_t *status = (uint32_t *)(desc + 1); 2523 2524 /* 2525 * State change allows hardware switch change to be 2526 * noted. However, we handle this in iwn_intr as we 2527 * get both the enable/disble intr. 2528 */ 2529 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2530 BUS_DMASYNC_POSTREAD); 2531 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 2532 le32toh(*status)); 2533 break; 2534 } 2535 case IWN_START_SCAN: 2536 { 2537 struct iwn_start_scan *scan = 2538 (struct iwn_start_scan *)(desc + 1); 2539 2540 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2541 BUS_DMASYNC_POSTREAD); 2542 DPRINTF(sc, IWN_DEBUG_ANY, 2543 "%s: scanning channel %d status %x\n", 2544 __func__, scan->chan, le32toh(scan->status)); 2545 break; 2546 } 2547 case IWN_STOP_SCAN: 2548 { 2549 struct iwn_stop_scan *scan = 2550 (struct iwn_stop_scan *)(desc + 1); 2551 2552 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2553 BUS_DMASYNC_POSTREAD); 2554 DPRINTF(sc, IWN_DEBUG_STATE, 2555 "scan finished nchan=%d status=%d chan=%d\n", 2556 scan->nchan, scan->status, scan->chan); 2557 2558 IWN_UNLOCK(sc); 2559 ieee80211_scan_next(vap); 2560 IWN_LOCK(sc); 2561 break; 2562 } 2563 case IWN5000_CALIBRATION_RESULT: 2564 iwn5000_rx_calib_result(sc, desc, data); 2565 break; 2566 2567 case IWN5000_CALIBRATION_DONE: 2568 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2569 wakeup(sc); 2570 break; 2571 } 2572 2573 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2574 } 2575 2576 /* Tell the firmware what we have processed. */ 2577 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2578 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2579 } 2580 2581 /* 2582 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2583 * from power-down sleep mode. 2584 */ 2585 static void 2586 iwn_wakeup_intr(struct iwn_softc *sc) 2587 { 2588 int qid; 2589 2590 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 2591 __func__); 2592 2593 /* Wakeup RX and TX rings. */ 2594 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2595 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) { 2596 struct iwn_tx_ring *ring = &sc->txq[qid]; 2597 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2598 } 2599 } 2600 2601 static void 2602 iwn_rftoggle_intr(struct iwn_softc *sc) 2603 { 2604 struct ifnet *ifp = sc->sc_ifp; 2605 struct ieee80211com *ic = ifp->if_l2com; 2606 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 2607 2608 IWN_LOCK_ASSERT(sc); 2609 2610 device_printf(sc->sc_dev, "RF switch: radio %s\n", 2611 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2612 if (tmp & IWN_GP_CNTRL_RFKILL) 2613 ieee80211_runtask(ic, &sc->sc_radioon_task); 2614 else 2615 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2616 } 2617 2618 /* 2619 * Dump the error log of the firmware when a firmware panic occurs. Although 2620 * we can't debug the firmware because it is neither open source nor free, it 2621 * can help us to identify certain classes of problems. 2622 */ 2623 static void 2624 iwn_fatal_intr(struct iwn_softc *sc) 2625 { 2626 const struct iwn_hal *hal = sc->sc_hal; 2627 struct iwn_fw_dump dump; 2628 int i; 2629 2630 IWN_LOCK_ASSERT(sc); 2631 2632 /* Force a complete recalibration on next init. */ 2633 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2634 2635 /* Check that the error log address is valid. */ 2636 if (sc->errptr < IWN_FW_DATA_BASE || 2637 sc->errptr + sizeof (dump) > 2638 IWN_FW_DATA_BASE + hal->fw_data_maxsz) { 2639 printf("%s: bad firmware error log address 0x%08x\n", 2640 __func__, sc->errptr); 2641 return; 2642 } 2643 if (iwn_nic_lock(sc) != 0) { 2644 printf("%s: could not read firmware error log\n", 2645 __func__); 2646 return; 2647 } 2648 /* Read firmware error log from SRAM. */ 2649 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2650 sizeof (dump) / sizeof (uint32_t)); 2651 iwn_nic_unlock(sc); 2652 2653 if (dump.valid == 0) { 2654 printf("%s: firmware error log is empty\n", 2655 __func__); 2656 return; 2657 } 2658 printf("firmware error log:\n"); 2659 printf(" error type = \"%s\" (0x%08X)\n", 2660 (dump.id < nitems(iwn_fw_errmsg)) ? 2661 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2662 dump.id); 2663 printf(" program counter = 0x%08X\n", dump.pc); 2664 printf(" source line = 0x%08X\n", dump.src_line); 2665 printf(" error data = 0x%08X%08X\n", 2666 dump.error_data[0], dump.error_data[1]); 2667 printf(" branch link = 0x%08X%08X\n", 2668 dump.branch_link[0], dump.branch_link[1]); 2669 printf(" interrupt link = 0x%08X%08X\n", 2670 dump.interrupt_link[0], dump.interrupt_link[1]); 2671 printf(" time = %u\n", dump.time[0]); 2672 2673 /* Dump driver status (TX and RX rings) while we're here. */ 2674 printf("driver status:\n"); 2675 for (i = 0; i < hal->ntxqs; i++) { 2676 struct iwn_tx_ring *ring = &sc->txq[i]; 2677 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2678 i, ring->qid, ring->cur, ring->queued); 2679 } 2680 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2681 } 2682 2683 static void 2684 iwn_intr(void *arg) 2685 { 2686 struct iwn_softc *sc = arg; 2687 struct ifnet *ifp = sc->sc_ifp; 2688 uint32_t r1, r2, tmp; 2689 2690 IWN_LOCK(sc); 2691 2692 /* Disable interrupts. */ 2693 IWN_WRITE(sc, IWN_INT_MASK, 0); 2694 2695 /* Read interrupts from ICT (fast) or from registers (slow). */ 2696 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2697 tmp = 0; 2698 while (sc->ict[sc->ict_cur] != 0) { 2699 tmp |= sc->ict[sc->ict_cur]; 2700 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2701 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2702 } 2703 tmp = le32toh(tmp); 2704 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2705 tmp = 0; 2706 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2707 tmp |= 0x8000; 2708 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2709 r2 = 0; /* Unused. */ 2710 } else { 2711 r1 = IWN_READ(sc, IWN_INT); 2712 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2713 return; /* Hardware gone! */ 2714 r2 = IWN_READ(sc, IWN_FH_INT); 2715 } 2716 2717 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 2718 2719 if (r1 == 0 && r2 == 0) 2720 goto done; /* Interrupt not for us. */ 2721 2722 /* Acknowledge interrupts. */ 2723 IWN_WRITE(sc, IWN_INT, r1); 2724 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2725 IWN_WRITE(sc, IWN_FH_INT, r2); 2726 2727 if (r1 & IWN_INT_RF_TOGGLED) { 2728 iwn_rftoggle_intr(sc); 2729 goto done; 2730 } 2731 if (r1 & IWN_INT_CT_REACHED) { 2732 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 2733 __func__); 2734 } 2735 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2736 iwn_fatal_intr(sc); 2737 ifp->if_flags &= ~IFF_UP; 2738 iwn_stop_locked(sc); 2739 goto done; 2740 } 2741 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2742 (r2 & IWN_FH_INT_RX)) { 2743 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2744 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2745 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2746 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2747 IWN_INT_PERIODIC_DIS); 2748 iwn_notif_intr(sc); 2749 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2750 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2751 IWN_INT_PERIODIC_ENA); 2752 } 2753 } else 2754 iwn_notif_intr(sc); 2755 } 2756 2757 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2758 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2759 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2760 wakeup(sc); /* FH DMA transfer completed. */ 2761 } 2762 2763 if (r1 & IWN_INT_ALIVE) 2764 wakeup(sc); /* Firmware is alive. */ 2765 2766 if (r1 & IWN_INT_WAKEUP) 2767 iwn_wakeup_intr(sc); 2768 2769 done: 2770 /* Re-enable interrupts. */ 2771 if (ifp->if_flags & IFF_UP) 2772 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2773 2774 IWN_UNLOCK(sc); 2775 } 2776 2777 /* 2778 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2779 * 5000 adapters use a slightly different format.) 2780 */ 2781 static void 2782 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2783 uint16_t len) 2784 { 2785 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2786 2787 *w = htole16(len + 8); 2788 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2789 BUS_DMASYNC_PREWRITE); 2790 if (idx < IWN_SCHED_WINSZ) { 2791 *(w + IWN_TX_RING_COUNT) = *w; 2792 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2793 BUS_DMASYNC_PREWRITE); 2794 } 2795 } 2796 2797 static void 2798 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2799 uint16_t len) 2800 { 2801 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2802 2803 *w = htole16(id << 12 | (len + 8)); 2804 2805 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2806 BUS_DMASYNC_PREWRITE); 2807 if (idx < IWN_SCHED_WINSZ) { 2808 *(w + IWN_TX_RING_COUNT) = *w; 2809 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2810 BUS_DMASYNC_PREWRITE); 2811 } 2812 } 2813 2814 #ifdef notyet 2815 static void 2816 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2817 { 2818 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2819 2820 *w = (*w & htole16(0xf000)) | htole16(1); 2821 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2822 BUS_DMASYNC_PREWRITE); 2823 if (idx < IWN_SCHED_WINSZ) { 2824 *(w + IWN_TX_RING_COUNT) = *w; 2825 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2826 BUS_DMASYNC_PREWRITE); 2827 } 2828 } 2829 #endif 2830 2831 static uint8_t 2832 iwn_plcp_signal(int rate) { 2833 int i; 2834 2835 for (i = 0; i < IWN_RIDX_MAX + 1; i++) { 2836 if (rate == iwn_rates[i].rate) 2837 return i; 2838 } 2839 2840 return 0; 2841 } 2842 2843 static int 2844 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 2845 struct iwn_tx_ring *ring) 2846 { 2847 const struct iwn_hal *hal = sc->sc_hal; 2848 const struct ieee80211_txparam *tp; 2849 const struct iwn_rate *rinfo; 2850 struct ieee80211vap *vap = ni->ni_vap; 2851 struct ieee80211com *ic = ni->ni_ic; 2852 struct iwn_node *wn = (void *)ni; 2853 struct iwn_tx_desc *desc; 2854 struct iwn_tx_data *data; 2855 struct iwn_tx_cmd *cmd; 2856 struct iwn_cmd_data *tx; 2857 struct ieee80211_frame *wh; 2858 struct ieee80211_key *k = NULL; 2859 struct mbuf *mnew; 2860 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 2861 uint32_t flags; 2862 u_int hdrlen; 2863 int totlen, error, pad, nsegs = 0, i, rate; 2864 uint8_t ridx, type, txant; 2865 2866 IWN_LOCK_ASSERT(sc); 2867 2868 wh = mtod(m, struct ieee80211_frame *); 2869 hdrlen = ieee80211_anyhdrsize(wh); 2870 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2871 2872 desc = &ring->desc[ring->cur]; 2873 data = &ring->data[ring->cur]; 2874 2875 /* Choose a TX rate index. */ 2876 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 2877 if (type == IEEE80211_FC0_TYPE_MGT) 2878 rate = tp->mgmtrate; 2879 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 2880 rate = tp->mcastrate; 2881 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2882 rate = tp->ucastrate; 2883 else { 2884 /* XXX pass pktlen */ 2885 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2886 rate = ni->ni_txrate; 2887 } 2888 ridx = iwn_plcp_signal(rate); 2889 rinfo = &iwn_rates[ridx]; 2890 2891 /* Encrypt the frame if need be. */ 2892 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2893 k = ieee80211_crypto_encap(ni, m); 2894 if (k == NULL) { 2895 m_freem(m); 2896 return ENOBUFS; 2897 } 2898 /* Packet header may have moved, reset our local pointer. */ 2899 wh = mtod(m, struct ieee80211_frame *); 2900 } 2901 totlen = m->m_pkthdr.len; 2902 2903 if (ieee80211_radiotap_active_vap(vap)) { 2904 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2905 2906 tap->wt_flags = 0; 2907 tap->wt_rate = rinfo->rate; 2908 if (k != NULL) 2909 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2910 2911 ieee80211_radiotap_tx(vap, m); 2912 } 2913 2914 /* Prepare TX firmware command. */ 2915 cmd = &ring->cmd[ring->cur]; 2916 cmd->code = IWN_CMD_TX_DATA; 2917 cmd->flags = 0; 2918 cmd->qid = ring->qid; 2919 cmd->idx = ring->cur; 2920 2921 tx = (struct iwn_cmd_data *)cmd->data; 2922 /* NB: No need to clear tx, all fields are reinitialized here. */ 2923 tx->scratch = 0; /* clear "scratch" area */ 2924 2925 flags = 0; 2926 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) 2927 flags |= IWN_TX_NEED_ACK; 2928 if ((wh->i_fc[0] & 2929 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2930 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2931 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2932 2933 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2934 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2935 2936 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2937 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2938 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2939 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2940 flags |= IWN_TX_NEED_RTS; 2941 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2942 ridx >= IWN_RIDX_OFDM6) { 2943 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2944 flags |= IWN_TX_NEED_CTS; 2945 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2946 flags |= IWN_TX_NEED_RTS; 2947 } 2948 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2949 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2950 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2951 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2952 flags |= IWN_TX_NEED_PROTECTION; 2953 } else 2954 flags |= IWN_TX_FULL_TXOP; 2955 } 2956 } 2957 2958 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2959 type != IEEE80211_FC0_TYPE_DATA) 2960 tx->id = hal->broadcast_id; 2961 else 2962 tx->id = wn->id; 2963 2964 if (type == IEEE80211_FC0_TYPE_MGT) { 2965 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2966 2967 /* Tell HW to set timestamp in probe responses. */ 2968 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2969 flags |= IWN_TX_INSERT_TSTAMP; 2970 2971 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2972 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2973 tx->timeout = htole16(3); 2974 else 2975 tx->timeout = htole16(2); 2976 } else 2977 tx->timeout = htole16(0); 2978 2979 if (hdrlen & 3) { 2980 /* First segment length must be a multiple of 4. */ 2981 flags |= IWN_TX_NEED_PADDING; 2982 pad = 4 - (hdrlen & 3); 2983 } else 2984 pad = 0; 2985 2986 tx->len = htole16(totlen); 2987 tx->tid = 0; 2988 tx->rts_ntries = 60; 2989 tx->data_ntries = 15; 2990 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2991 tx->plcp = rinfo->plcp; 2992 tx->rflags = rinfo->flags; 2993 if (tx->id == hal->broadcast_id) { 2994 /* Group or management frame. */ 2995 tx->linkq = 0; 2996 /* XXX Alternate between antenna A and B? */ 2997 txant = IWN_LSB(sc->txchainmask); 2998 tx->rflags |= IWN_RFLAG_ANT(txant); 2999 } else { 3000 tx->linkq = IWN_RIDX_OFDM54 - ridx; 3001 flags |= IWN_TX_LINKQ; /* enable MRR */ 3002 } 3003 3004 /* Set physical address of "scratch area". */ 3005 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3006 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3007 3008 /* Copy 802.11 header in TX command. */ 3009 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3010 3011 /* Trim 802.11 header. */ 3012 m_adj(m, hdrlen); 3013 tx->security = 0; 3014 tx->flags = htole32(flags); 3015 3016 if (m->m_len > 0) { 3017 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3018 m, segs, &nsegs, BUS_DMA_NOWAIT); 3019 if (error == EFBIG) { 3020 /* too many fragments, linearize */ 3021 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3022 if (mnew == NULL) { 3023 device_printf(sc->sc_dev, 3024 "%s: could not defrag mbuf\n", __func__); 3025 m_freem(m); 3026 return ENOBUFS; 3027 } 3028 m = mnew; 3029 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3030 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3031 } 3032 if (error != 0) { 3033 device_printf(sc->sc_dev, 3034 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3035 __func__, error); 3036 m_freem(m); 3037 return error; 3038 } 3039 } 3040 3041 data->m = m; 3042 data->ni = ni; 3043 3044 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3045 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3046 3047 /* Fill TX descriptor. */ 3048 desc->nsegs = 1 + nsegs; 3049 /* First DMA segment is used by the TX command. */ 3050 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3051 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3052 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3053 /* Other DMA segments are for data payload. */ 3054 for (i = 1; i <= nsegs; i++) { 3055 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3056 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3057 segs[i - 1].ds_len << 4); 3058 } 3059 3060 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3061 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3062 BUS_DMASYNC_PREWRITE); 3063 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3064 BUS_DMASYNC_PREWRITE); 3065 3066 #ifdef notyet 3067 /* Update TX scheduler. */ 3068 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3069 #endif 3070 3071 /* Kick TX ring. */ 3072 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3073 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3074 3075 /* Mark TX ring as full if we reach a certain threshold. */ 3076 if (++ring->queued > IWN_TX_RING_HIMARK) 3077 sc->qfullmsk |= 1 << ring->qid; 3078 3079 return 0; 3080 } 3081 3082 static int 3083 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3084 struct ieee80211_node *ni, struct iwn_tx_ring *ring, 3085 const struct ieee80211_bpf_params *params) 3086 { 3087 const struct iwn_hal *hal = sc->sc_hal; 3088 const struct iwn_rate *rinfo; 3089 struct ifnet *ifp = sc->sc_ifp; 3090 struct ieee80211vap *vap = ni->ni_vap; 3091 struct ieee80211com *ic = ifp->if_l2com; 3092 struct iwn_tx_cmd *cmd; 3093 struct iwn_cmd_data *tx; 3094 struct ieee80211_frame *wh; 3095 struct iwn_tx_desc *desc; 3096 struct iwn_tx_data *data; 3097 struct mbuf *mnew; 3098 bus_addr_t paddr; 3099 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 3100 uint32_t flags; 3101 u_int hdrlen; 3102 int totlen, error, pad, nsegs = 0, i, rate; 3103 uint8_t ridx, type, txant; 3104 3105 IWN_LOCK_ASSERT(sc); 3106 3107 wh = mtod(m, struct ieee80211_frame *); 3108 hdrlen = ieee80211_anyhdrsize(wh); 3109 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3110 3111 desc = &ring->desc[ring->cur]; 3112 data = &ring->data[ring->cur]; 3113 3114 /* Choose a TX rate index. */ 3115 rate = params->ibp_rate0; 3116 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3117 /* XXX fall back to mcast/mgmt rate? */ 3118 m_freem(m); 3119 return EINVAL; 3120 } 3121 ridx = iwn_plcp_signal(rate); 3122 rinfo = &iwn_rates[ridx]; 3123 3124 totlen = m->m_pkthdr.len; 3125 3126 /* Prepare TX firmware command. */ 3127 cmd = &ring->cmd[ring->cur]; 3128 cmd->code = IWN_CMD_TX_DATA; 3129 cmd->flags = 0; 3130 cmd->qid = ring->qid; 3131 cmd->idx = ring->cur; 3132 3133 tx = (struct iwn_cmd_data *)cmd->data; 3134 /* NB: No need to clear tx, all fields are reinitialized here. */ 3135 tx->scratch = 0; /* clear "scratch" area */ 3136 3137 flags = 0; 3138 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3139 flags |= IWN_TX_NEED_ACK; 3140 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3141 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3142 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3143 flags &= ~IWN_TX_NEED_RTS; 3144 flags |= IWN_TX_NEED_PROTECTION; 3145 } else 3146 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3147 } 3148 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3149 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3150 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3151 flags &= ~IWN_TX_NEED_CTS; 3152 flags |= IWN_TX_NEED_PROTECTION; 3153 } else 3154 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3155 } 3156 if (type == IEEE80211_FC0_TYPE_MGT) { 3157 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3158 3159 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3160 flags |= IWN_TX_INSERT_TSTAMP; 3161 3162 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3163 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3164 tx->timeout = htole16(3); 3165 else 3166 tx->timeout = htole16(2); 3167 } else 3168 tx->timeout = htole16(0); 3169 3170 if (hdrlen & 3) { 3171 /* First segment length must be a multiple of 4. */ 3172 flags |= IWN_TX_NEED_PADDING; 3173 pad = 4 - (hdrlen & 3); 3174 } else 3175 pad = 0; 3176 3177 if (ieee80211_radiotap_active_vap(vap)) { 3178 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3179 3180 tap->wt_flags = 0; 3181 tap->wt_rate = rate; 3182 3183 ieee80211_radiotap_tx(vap, m); 3184 } 3185 3186 tx->len = htole16(totlen); 3187 tx->tid = 0; 3188 tx->id = hal->broadcast_id; 3189 tx->rts_ntries = params->ibp_try1; 3190 tx->data_ntries = params->ibp_try0; 3191 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3192 tx->plcp = rinfo->plcp; 3193 tx->rflags = rinfo->flags; 3194 /* Group or management frame. */ 3195 tx->linkq = 0; 3196 txant = IWN_LSB(sc->txchainmask); 3197 tx->rflags |= IWN_RFLAG_ANT(txant); 3198 /* Set physical address of "scratch area". */ 3199 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); 3200 tx->loaddr = htole32(IWN_LOADDR(paddr)); 3201 tx->hiaddr = IWN_HIADDR(paddr); 3202 3203 /* Copy 802.11 header in TX command. */ 3204 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3205 3206 /* Trim 802.11 header. */ 3207 m_adj(m, hdrlen); 3208 tx->security = 0; 3209 tx->flags = htole32(flags); 3210 3211 if (m->m_len > 0) { 3212 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3213 m, segs, &nsegs, BUS_DMA_NOWAIT); 3214 if (error == EFBIG) { 3215 /* Too many fragments, linearize. */ 3216 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3217 if (mnew == NULL) { 3218 device_printf(sc->sc_dev, 3219 "%s: could not defrag mbuf\n", __func__); 3220 m_freem(m); 3221 return ENOBUFS; 3222 } 3223 m = mnew; 3224 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3225 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3226 } 3227 if (error != 0) { 3228 device_printf(sc->sc_dev, 3229 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3230 __func__, error); 3231 m_freem(m); 3232 return error; 3233 } 3234 } 3235 3236 data->m = m; 3237 data->ni = ni; 3238 3239 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3240 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3241 3242 /* Fill TX descriptor. */ 3243 desc->nsegs = 1 + nsegs; 3244 /* First DMA segment is used by the TX command. */ 3245 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3246 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3247 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3248 /* Other DMA segments are for data payload. */ 3249 for (i = 1; i <= nsegs; i++) { 3250 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3251 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3252 segs[i - 1].ds_len << 4); 3253 } 3254 3255 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3256 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3257 BUS_DMASYNC_PREWRITE); 3258 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3259 BUS_DMASYNC_PREWRITE); 3260 3261 #ifdef notyet 3262 /* Update TX scheduler. */ 3263 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3264 #endif 3265 3266 /* Kick TX ring. */ 3267 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3268 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3269 3270 /* Mark TX ring as full if we reach a certain threshold. */ 3271 if (++ring->queued > IWN_TX_RING_HIMARK) 3272 sc->qfullmsk |= 1 << ring->qid; 3273 3274 return 0; 3275 } 3276 3277 static int 3278 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3279 const struct ieee80211_bpf_params *params) 3280 { 3281 struct ieee80211com *ic = ni->ni_ic; 3282 struct ifnet *ifp = ic->ic_ifp; 3283 struct iwn_softc *sc = ifp->if_softc; 3284 struct iwn_tx_ring *txq; 3285 int error = 0; 3286 3287 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3288 ieee80211_free_node(ni); 3289 m_freem(m); 3290 return ENETDOWN; 3291 } 3292 3293 IWN_LOCK(sc); 3294 if (params == NULL) 3295 txq = &sc->txq[M_WME_GETAC(m)]; 3296 else 3297 txq = &sc->txq[params->ibp_pri & 3]; 3298 3299 if (params == NULL) { 3300 /* 3301 * Legacy path; interpret frame contents to decide 3302 * precisely how to send the frame. 3303 */ 3304 error = iwn_tx_data(sc, m, ni, txq); 3305 } else { 3306 /* 3307 * Caller supplied explicit parameters to use in 3308 * sending the frame. 3309 */ 3310 error = iwn_tx_data_raw(sc, m, ni, txq, params); 3311 } 3312 if (error != 0) { 3313 /* NB: m is reclaimed on tx failure */ 3314 ieee80211_free_node(ni); 3315 ifp->if_oerrors++; 3316 } 3317 IWN_UNLOCK(sc); 3318 return error; 3319 } 3320 3321 static void 3322 iwn_start(struct ifnet *ifp) 3323 { 3324 struct iwn_softc *sc = ifp->if_softc; 3325 3326 IWN_LOCK(sc); 3327 iwn_start_locked(ifp); 3328 IWN_UNLOCK(sc); 3329 } 3330 3331 static void 3332 iwn_start_locked(struct ifnet *ifp) 3333 { 3334 struct iwn_softc *sc = ifp->if_softc; 3335 struct ieee80211_node *ni; 3336 struct iwn_tx_ring *txq; 3337 struct mbuf *m; 3338 int pri; 3339 3340 IWN_LOCK_ASSERT(sc); 3341 3342 for (;;) { 3343 if (sc->qfullmsk != 0) { 3344 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3345 break; 3346 } 3347 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3348 if (m == NULL) 3349 break; 3350 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3351 pri = M_WME_GETAC(m); 3352 txq = &sc->txq[pri]; 3353 if (iwn_tx_data(sc, m, ni, txq) != 0) { 3354 ifp->if_oerrors++; 3355 ieee80211_free_node(ni); 3356 break; 3357 } 3358 sc->sc_tx_timer = 5; 3359 } 3360 } 3361 3362 static void 3363 iwn_watchdog(struct iwn_softc *sc) 3364 { 3365 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { 3366 struct ifnet *ifp = sc->sc_ifp; 3367 struct ieee80211com *ic = ifp->if_l2com; 3368 3369 if_printf(ifp, "device timeout\n"); 3370 ieee80211_runtask(ic, &sc->sc_reinit_task); 3371 } 3372 } 3373 3374 static int 3375 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3376 { 3377 struct iwn_softc *sc = ifp->if_softc; 3378 struct ieee80211com *ic = ifp->if_l2com; 3379 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3380 struct ifreq *ifr = (struct ifreq *) data; 3381 int error = 0, startall = 0, stop = 0; 3382 3383 switch (cmd) { 3384 case SIOCSIFFLAGS: 3385 IWN_LOCK(sc); 3386 if (ifp->if_flags & IFF_UP) { 3387 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3388 iwn_init_locked(sc); 3389 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 3390 startall = 1; 3391 else 3392 stop = 1; 3393 } 3394 } else { 3395 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3396 iwn_stop_locked(sc); 3397 } 3398 IWN_UNLOCK(sc); 3399 if (startall) 3400 ieee80211_start_all(ic); 3401 else if (vap != NULL && stop) 3402 ieee80211_stop(vap); 3403 break; 3404 case SIOCGIFMEDIA: 3405 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3406 break; 3407 case SIOCGIFADDR: 3408 error = ether_ioctl(ifp, cmd, data); 3409 break; 3410 default: 3411 error = EINVAL; 3412 break; 3413 } 3414 return error; 3415 } 3416 3417 /* 3418 * Send a command to the firmware. 3419 */ 3420 static int 3421 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3422 { 3423 struct iwn_tx_ring *ring = &sc->txq[4]; 3424 struct iwn_tx_desc *desc; 3425 struct iwn_tx_data *data; 3426 struct iwn_tx_cmd *cmd; 3427 struct mbuf *m; 3428 bus_addr_t paddr; 3429 int totlen, error; 3430 3431 IWN_LOCK_ASSERT(sc); 3432 3433 desc = &ring->desc[ring->cur]; 3434 data = &ring->data[ring->cur]; 3435 totlen = 4 + size; 3436 3437 if (size > sizeof cmd->data) { 3438 /* Command is too large to fit in a descriptor. */ 3439 if (totlen > MCLBYTES) 3440 return EINVAL; 3441 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3442 if (m == NULL) 3443 return ENOMEM; 3444 cmd = mtod(m, struct iwn_tx_cmd *); 3445 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3446 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3447 if (error != 0) { 3448 m_freem(m); 3449 return error; 3450 } 3451 data->m = m; 3452 } else { 3453 cmd = &ring->cmd[ring->cur]; 3454 paddr = data->cmd_paddr; 3455 } 3456 3457 cmd->code = code; 3458 cmd->flags = 0; 3459 cmd->qid = ring->qid; 3460 cmd->idx = ring->cur; 3461 memcpy(cmd->data, buf, size); 3462 3463 desc->nsegs = 1; 3464 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3465 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3466 3467 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 3468 __func__, iwn_intr_str(cmd->code), cmd->code, 3469 cmd->flags, cmd->qid, cmd->idx); 3470 3471 if (size > sizeof cmd->data) { 3472 bus_dmamap_sync(ring->data_dmat, data->map, 3473 BUS_DMASYNC_PREWRITE); 3474 } else { 3475 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3476 BUS_DMASYNC_PREWRITE); 3477 } 3478 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3479 BUS_DMASYNC_PREWRITE); 3480 3481 #ifdef notyet 3482 /* Update TX scheduler. */ 3483 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0); 3484 #endif 3485 3486 /* Kick command ring. */ 3487 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3488 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3489 3490 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 3491 } 3492 3493 static int 3494 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3495 { 3496 struct iwn4965_node_info hnode; 3497 caddr_t src, dst; 3498 3499 /* 3500 * We use the node structure for 5000 Series internally (it is 3501 * a superset of the one for 4965AGN). We thus copy the common 3502 * fields before sending the command. 3503 */ 3504 src = (caddr_t)node; 3505 dst = (caddr_t)&hnode; 3506 memcpy(dst, src, 48); 3507 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3508 memcpy(dst + 48, src + 72, 20); 3509 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3510 } 3511 3512 static int 3513 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3514 { 3515 /* Direct mapping. */ 3516 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3517 } 3518 3519 #if 0 /* HT */ 3520 static const uint8_t iwn_ridx_to_plcp[] = { 3521 10, 20, 55, 110, /* CCK */ 3522 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */ 3523 }; 3524 static const uint8_t iwn_siso_mcs_to_plcp[] = { 3525 0, 0, 0, 0, /* CCK */ 3526 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */ 3527 }; 3528 static const uint8_t iwn_mimo_mcs_to_plcp[] = { 3529 0, 0, 0, 0, /* CCK */ 3530 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */ 3531 }; 3532 #endif 3533 static const uint8_t iwn_prev_ridx[] = { 3534 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */ 3535 0, 0, 1, 5, /* CCK */ 3536 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */ 3537 }; 3538 3539 /* 3540 * Configure hardware link parameters for the specified 3541 * node operating on the specified channel. 3542 */ 3543 static int 3544 iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async) 3545 { 3546 struct ifnet *ifp = sc->sc_ifp; 3547 struct ieee80211com *ic = ifp->if_l2com; 3548 struct iwn_cmd_link_quality linkq; 3549 const struct iwn_rate *rinfo; 3550 int i; 3551 uint8_t txant, ridx; 3552 3553 /* Use the first valid TX antenna. */ 3554 txant = IWN_LSB(sc->txchainmask); 3555 3556 memset(&linkq, 0, sizeof linkq); 3557 linkq.id = id; 3558 linkq.antmsk_1stream = txant; 3559 linkq.antmsk_2stream = IWN_ANT_AB; 3560 linkq.ampdu_max = 31; 3561 linkq.ampdu_threshold = 3; 3562 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3563 3564 #if 0 /* HT */ 3565 if (IEEE80211_IS_CHAN_HT(c)) 3566 linkq.mimo = 1; 3567 #endif 3568 3569 if (id == IWN_ID_BSS) 3570 ridx = IWN_RIDX_OFDM54; 3571 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) 3572 ridx = IWN_RIDX_OFDM6; 3573 else 3574 ridx = IWN_RIDX_CCK1; 3575 3576 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3577 rinfo = &iwn_rates[ridx]; 3578 #if 0 /* HT */ 3579 if (IEEE80211_IS_CHAN_HT40(c)) { 3580 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx] 3581 | IWN_RIDX_MCS; 3582 linkq.retry[i].rflags = IWN_RFLAG_HT 3583 | IWN_RFLAG_HT40; 3584 /* XXX shortGI */ 3585 } else if (IEEE80211_IS_CHAN_HT(c)) { 3586 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx] 3587 | IWN_RIDX_MCS; 3588 linkq.retry[i].rflags = IWN_RFLAG_HT; 3589 /* XXX shortGI */ 3590 } else 3591 #endif 3592 { 3593 linkq.retry[i].plcp = rinfo->plcp; 3594 linkq.retry[i].rflags = rinfo->flags; 3595 } 3596 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3597 ridx = iwn_prev_ridx[ridx]; 3598 } 3599 #ifdef IWN_DEBUG 3600 if (sc->sc_debug & IWN_DEBUG_STATE) { 3601 printf("%s: set link quality for node %d, mimo %d ssmask %d\n", 3602 __func__, id, linkq.mimo, linkq.antmsk_1stream); 3603 printf("%s:", __func__); 3604 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) 3605 printf(" %d:%x", linkq.retry[i].plcp, 3606 linkq.retry[i].rflags); 3607 printf("\n"); 3608 } 3609 #endif 3610 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3611 } 3612 3613 /* 3614 * Broadcast node is used to send group-addressed and management frames. 3615 */ 3616 static int 3617 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3618 { 3619 const struct iwn_hal *hal = sc->sc_hal; 3620 struct ifnet *ifp = sc->sc_ifp; 3621 struct iwn_node_info node; 3622 int error; 3623 3624 memset(&node, 0, sizeof node); 3625 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3626 node.id = hal->broadcast_id; 3627 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 3628 error = hal->add_node(sc, &node, async); 3629 if (error != 0) 3630 return error; 3631 3632 error = iwn_set_link_quality(sc, hal->broadcast_id, async); 3633 return error; 3634 } 3635 3636 static int 3637 iwn_wme_update(struct ieee80211com *ic) 3638 { 3639 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3640 #define IWN_TXOP_TO_US(v) (v<<5) 3641 struct iwn_softc *sc = ic->ic_ifp->if_softc; 3642 struct iwn_edca_params cmd; 3643 int i; 3644 3645 memset(&cmd, 0, sizeof cmd); 3646 cmd.flags = htole32(IWN_EDCA_UPDATE); 3647 for (i = 0; i < WME_NUM_AC; i++) { 3648 const struct wmeParams *wmep = 3649 &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; 3650 cmd.ac[i].aifsn = wmep->wmep_aifsn; 3651 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin)); 3652 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax)); 3653 cmd.ac[i].txoplimit = 3654 htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit)); 3655 } 3656 IEEE80211_UNLOCK(ic); 3657 IWN_LOCK(sc); 3658 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/); 3659 IWN_UNLOCK(sc); 3660 IEEE80211_LOCK(ic); 3661 return 0; 3662 #undef IWN_TXOP_TO_US 3663 #undef IWN_EXP2 3664 } 3665 3666 static void 3667 iwn_update_mcast(struct ifnet *ifp) 3668 { 3669 /* Ignore */ 3670 } 3671 3672 static void 3673 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3674 { 3675 struct iwn_cmd_led led; 3676 3677 /* Clear microcode LED ownership. */ 3678 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3679 3680 led.which = which; 3681 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3682 led.off = off; 3683 led.on = on; 3684 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3685 } 3686 3687 /* 3688 * Set the critical temperature at which the firmware will stop the radio 3689 * and notify us. 3690 */ 3691 static int 3692 iwn_set_critical_temp(struct iwn_softc *sc) 3693 { 3694 struct iwn_critical_temp crit; 3695 int32_t temp; 3696 3697 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3698 3699 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3700 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3701 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3702 temp = IWN_CTOK(110); 3703 else 3704 temp = 110; 3705 memset(&crit, 0, sizeof crit); 3706 crit.tempR = htole32(temp); 3707 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", 3708 temp); 3709 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3710 } 3711 3712 static int 3713 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3714 { 3715 struct iwn_cmd_timing cmd; 3716 uint64_t val, mod; 3717 3718 memset(&cmd, 0, sizeof cmd); 3719 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3720 cmd.bintval = htole16(ni->ni_intval); 3721 cmd.lintval = htole16(10); 3722 3723 /* Compute remaining time until next beacon. */ 3724 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3725 mod = le64toh(cmd.tstamp) % val; 3726 cmd.binitval = htole32((uint32_t)(val - mod)); 3727 3728 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3729 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3730 3731 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3732 } 3733 3734 static void 3735 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3736 { 3737 struct ifnet *ifp = sc->sc_ifp; 3738 struct ieee80211com *ic = ifp->if_l2com; 3739 3740 /* Adjust TX power if need be (delta >= 3 degC.) */ 3741 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 3742 __func__, sc->temp, temp); 3743 if (abs(temp - sc->temp) >= 3) { 3744 /* Record temperature of last calibration. */ 3745 sc->temp = temp; 3746 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 3747 } 3748 } 3749 3750 /* 3751 * Set TX power for current channel (each rate has its own power settings). 3752 * This function takes into account the regulatory information from EEPROM, 3753 * the current temperature and the current voltage. 3754 */ 3755 static int 3756 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3757 int async) 3758 { 3759 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3760 #define fdivround(a, b, n) \ 3761 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3762 /* Linear interpolation. */ 3763 #define interpolate(x, x1, y1, x2, y2, n) \ 3764 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3765 3766 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3767 struct ifnet *ifp = sc->sc_ifp; 3768 struct ieee80211com *ic = ifp->if_l2com; 3769 struct iwn_ucode_info *uc = &sc->ucode_info; 3770 struct iwn4965_cmd_txpower cmd; 3771 struct iwn4965_eeprom_chan_samples *chans; 3772 int32_t vdiff, tdiff; 3773 int i, c, grp, maxpwr; 3774 const uint8_t *rf_gain, *dsp_gain; 3775 uint8_t chan; 3776 3777 /* Retrieve channel number. */ 3778 chan = ieee80211_chan2ieee(ic, ch); 3779 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 3780 chan); 3781 3782 memset(&cmd, 0, sizeof cmd); 3783 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3784 cmd.chan = chan; 3785 3786 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3787 maxpwr = sc->maxpwr5GHz; 3788 rf_gain = iwn4965_rf_gain_5ghz; 3789 dsp_gain = iwn4965_dsp_gain_5ghz; 3790 } else { 3791 maxpwr = sc->maxpwr2GHz; 3792 rf_gain = iwn4965_rf_gain_2ghz; 3793 dsp_gain = iwn4965_dsp_gain_2ghz; 3794 } 3795 3796 /* Compute voltage compensation. */ 3797 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3798 if (vdiff > 0) 3799 vdiff *= 2; 3800 if (abs(vdiff) > 2) 3801 vdiff = 0; 3802 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3803 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3804 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 3805 3806 /* Get channel attenuation group. */ 3807 if (chan <= 20) /* 1-20 */ 3808 grp = 4; 3809 else if (chan <= 43) /* 34-43 */ 3810 grp = 0; 3811 else if (chan <= 70) /* 44-70 */ 3812 grp = 1; 3813 else if (chan <= 124) /* 71-124 */ 3814 grp = 2; 3815 else /* 125-200 */ 3816 grp = 3; 3817 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3818 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 3819 3820 /* Get channel sub-band. */ 3821 for (i = 0; i < IWN_NBANDS; i++) 3822 if (sc->bands[i].lo != 0 && 3823 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3824 break; 3825 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3826 return EINVAL; 3827 chans = sc->bands[i].chans; 3828 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3829 "%s: chan %d sub-band=%d\n", __func__, chan, i); 3830 3831 for (c = 0; c < 2; c++) { 3832 uint8_t power, gain, temp; 3833 int maxchpwr, pwr, ridx, idx; 3834 3835 power = interpolate(chan, 3836 chans[0].num, chans[0].samples[c][1].power, 3837 chans[1].num, chans[1].samples[c][1].power, 1); 3838 gain = interpolate(chan, 3839 chans[0].num, chans[0].samples[c][1].gain, 3840 chans[1].num, chans[1].samples[c][1].gain, 1); 3841 temp = interpolate(chan, 3842 chans[0].num, chans[0].samples[c][1].temp, 3843 chans[1].num, chans[1].samples[c][1].temp, 1); 3844 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3845 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 3846 __func__, c, power, gain, temp); 3847 3848 /* Compute temperature compensation. */ 3849 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3850 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3851 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 3852 __func__, tdiff, sc->temp, temp); 3853 3854 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3855 /* Convert dBm to half-dBm. */ 3856 maxchpwr = sc->maxpwr[chan] * 2; 3857 if ((ridx / 8) & 1) 3858 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3859 3860 pwr = maxpwr; 3861 3862 /* Adjust TX power based on rate. */ 3863 if ((ridx % 8) == 5) 3864 pwr -= 15; /* OFDM48: -7.5dB */ 3865 else if ((ridx % 8) == 6) 3866 pwr -= 17; /* OFDM54: -8.5dB */ 3867 else if ((ridx % 8) == 7) 3868 pwr -= 20; /* OFDM60: -10dB */ 3869 else 3870 pwr -= 10; /* Others: -5dB */ 3871 3872 /* Do not exceed channel max TX power. */ 3873 if (pwr > maxchpwr) 3874 pwr = maxchpwr; 3875 3876 idx = gain - (pwr - power) - tdiff - vdiff; 3877 if ((ridx / 8) & 1) /* MIMO */ 3878 idx += (int32_t)le32toh(uc->atten[grp][c]); 3879 3880 if (cmd.band == 0) 3881 idx += 9; /* 5GHz */ 3882 if (ridx == IWN_RIDX_MAX) 3883 idx += 5; /* CCK */ 3884 3885 /* Make sure idx stays in a valid range. */ 3886 if (idx < 0) 3887 idx = 0; 3888 else if (idx > IWN4965_MAX_PWR_INDEX) 3889 idx = IWN4965_MAX_PWR_INDEX; 3890 3891 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3892 "%s: Tx chain %d, rate idx %d: power=%d\n", 3893 __func__, c, ridx, idx); 3894 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3895 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3896 } 3897 } 3898 3899 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3900 "%s: set tx power for chan %d\n", __func__, chan); 3901 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3902 3903 #undef interpolate 3904 #undef fdivround 3905 } 3906 3907 static int 3908 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3909 int async) 3910 { 3911 struct iwn5000_cmd_txpower cmd; 3912 3913 /* 3914 * TX power calibration is handled automatically by the firmware 3915 * for 5000 Series. 3916 */ 3917 memset(&cmd, 0, sizeof cmd); 3918 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3919 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3920 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3921 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 3922 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3923 } 3924 3925 /* 3926 * Retrieve the maximum RSSI (in dBm) among receivers. 3927 */ 3928 static int 3929 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3930 { 3931 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3932 uint8_t mask, agc; 3933 int rssi; 3934 3935 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3936 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3937 3938 rssi = 0; 3939 #if 0 3940 if (mask & IWN_ANT_A) /* Ant A */ 3941 rssi = max(rssi, phy->rssi[0]); 3942 if (mask & IWN_ATH_B) /* Ant B */ 3943 rssi = max(rssi, phy->rssi[2]); 3944 if (mask & IWN_ANT_C) /* Ant C */ 3945 rssi = max(rssi, phy->rssi[4]); 3946 #else 3947 rssi = max(rssi, phy->rssi[0]); 3948 rssi = max(rssi, phy->rssi[2]); 3949 rssi = max(rssi, phy->rssi[4]); 3950 #endif 3951 3952 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d " 3953 "result %d\n", __func__, agc, mask, 3954 phy->rssi[0], phy->rssi[2], phy->rssi[4], 3955 rssi - agc - IWN_RSSI_TO_DBM); 3956 return rssi - agc - IWN_RSSI_TO_DBM; 3957 } 3958 3959 static int 3960 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3961 { 3962 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3963 int rssi; 3964 uint8_t agc; 3965 3966 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3967 3968 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3969 le16toh(phy->rssi[1]) & 0xff); 3970 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3971 3972 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d " 3973 "result %d\n", __func__, agc, 3974 phy->rssi[0], phy->rssi[1], phy->rssi[2], 3975 rssi - agc - IWN_RSSI_TO_DBM); 3976 return rssi - agc - IWN_RSSI_TO_DBM; 3977 } 3978 3979 /* 3980 * Retrieve the average noise (in dBm) among receivers. 3981 */ 3982 static int 3983 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3984 { 3985 int i, total, nbant, noise; 3986 3987 total = nbant = 0; 3988 for (i = 0; i < 3; i++) { 3989 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3990 continue; 3991 total += noise; 3992 nbant++; 3993 } 3994 /* There should be at least one antenna but check anyway. */ 3995 return (nbant == 0) ? -127 : (total / nbant) - 107; 3996 } 3997 3998 /* 3999 * Compute temperature (in degC) from last received statistics. 4000 */ 4001 static int 4002 iwn4965_get_temperature(struct iwn_softc *sc) 4003 { 4004 struct iwn_ucode_info *uc = &sc->ucode_info; 4005 int32_t r1, r2, r3, r4, temp; 4006 4007 r1 = le32toh(uc->temp[0].chan20MHz); 4008 r2 = le32toh(uc->temp[1].chan20MHz); 4009 r3 = le32toh(uc->temp[2].chan20MHz); 4010 r4 = le32toh(sc->rawtemp); 4011 4012 if (r1 == r3) /* Prevents division by 0 (should not happen.) */ 4013 return 0; 4014 4015 /* Sign-extend 23-bit R4 value to 32-bit. */ 4016 r4 = (r4 << 8) >> 8; 4017 /* Compute temperature in Kelvin. */ 4018 temp = (259 * (r4 - r2)) / (r3 - r1); 4019 temp = (temp * 97) / 100 + 8; 4020 4021 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4022 IWN_KTOC(temp)); 4023 return IWN_KTOC(temp); 4024 } 4025 4026 static int 4027 iwn5000_get_temperature(struct iwn_softc *sc) 4028 { 4029 int32_t temp; 4030 4031 /* 4032 * Temperature is not used by the driver for 5000 Series because 4033 * TX power calibration is handled by firmware. We export it to 4034 * users through the sensor framework though. 4035 */ 4036 temp = le32toh(sc->rawtemp); 4037 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4038 temp = (temp / -5) + sc->temp_off; 4039 temp = IWN_KTOC(temp); 4040 } 4041 return temp; 4042 } 4043 4044 /* 4045 * Initialize sensitivity calibration state machine. 4046 */ 4047 static int 4048 iwn_init_sensitivity(struct iwn_softc *sc) 4049 { 4050 const struct iwn_hal *hal = sc->sc_hal; 4051 struct iwn_calib_state *calib = &sc->calib; 4052 uint32_t flags; 4053 int error; 4054 4055 /* Reset calibration state machine. */ 4056 memset(calib, 0, sizeof (*calib)); 4057 calib->state = IWN_CALIB_STATE_INIT; 4058 calib->cck_state = IWN_CCK_STATE_HIFA; 4059 /* Set initial correlation values. */ 4060 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4061 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4062 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4063 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4064 calib->cck_x4 = 125; 4065 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4066 calib->energy_cck = sc->limits->energy_cck; 4067 4068 /* Write initial sensitivity. */ 4069 error = iwn_send_sensitivity(sc); 4070 if (error != 0) 4071 return error; 4072 4073 /* Write initial gains. */ 4074 error = hal->init_gains(sc); 4075 if (error != 0) 4076 return error; 4077 4078 /* Request statistics at each beacon interval. */ 4079 flags = 0; 4080 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__); 4081 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4082 } 4083 4084 /* 4085 * Collect noise and RSSI statistics for the first 20 beacons received 4086 * after association and use them to determine connected antennas and 4087 * to set differential gains. 4088 */ 4089 static void 4090 iwn_collect_noise(struct iwn_softc *sc, 4091 const struct iwn_rx_general_stats *stats) 4092 { 4093 const struct iwn_hal *hal = sc->sc_hal; 4094 struct iwn_calib_state *calib = &sc->calib; 4095 uint32_t val; 4096 int i; 4097 4098 /* Accumulate RSSI and noise for all 3 antennas. */ 4099 for (i = 0; i < 3; i++) { 4100 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4101 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4102 } 4103 /* NB: We update differential gains only once after 20 beacons. */ 4104 if (++calib->nbeacons < 20) 4105 return; 4106 4107 /* Determine highest average RSSI. */ 4108 val = MAX(calib->rssi[0], calib->rssi[1]); 4109 val = MAX(calib->rssi[2], val); 4110 4111 /* Determine which antennas are connected. */ 4112 sc->chainmask = sc->rxchainmask; 4113 for (i = 0; i < 3; i++) 4114 if (val - calib->rssi[i] > 15 * 20) 4115 sc->chainmask &= ~(1 << i); 4116 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4117 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4118 __func__, sc->rxchainmask, sc->chainmask); 4119 4120 /* If none of the TX antennas are connected, keep at least one. */ 4121 if ((sc->chainmask & sc->txchainmask) == 0) 4122 sc->chainmask |= IWN_LSB(sc->txchainmask); 4123 4124 (void)hal->set_gains(sc); 4125 calib->state = IWN_CALIB_STATE_RUN; 4126 4127 #ifdef notyet 4128 /* XXX Disable RX chains with no antennas connected. */ 4129 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4130 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4131 #endif 4132 4133 #if 0 4134 /* XXX: not yet */ 4135 /* Enable power-saving mode if requested by user. */ 4136 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 4137 (void)iwn_set_pslevel(sc, 0, 3, 1); 4138 #endif 4139 } 4140 4141 static int 4142 iwn4965_init_gains(struct iwn_softc *sc) 4143 { 4144 struct iwn_phy_calib_gain cmd; 4145 4146 memset(&cmd, 0, sizeof cmd); 4147 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4148 /* Differential gains initially set to 0 for all 3 antennas. */ 4149 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4150 "%s: setting initial differential gains\n", __func__); 4151 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4152 } 4153 4154 static int 4155 iwn5000_init_gains(struct iwn_softc *sc) 4156 { 4157 struct iwn_phy_calib cmd; 4158 4159 memset(&cmd, 0, sizeof cmd); 4160 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 4161 cmd.ngroups = 1; 4162 cmd.isvalid = 1; 4163 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4164 "%s: setting initial differential gains\n", __func__); 4165 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4166 } 4167 4168 static int 4169 iwn4965_set_gains(struct iwn_softc *sc) 4170 { 4171 struct iwn_calib_state *calib = &sc->calib; 4172 struct iwn_phy_calib_gain cmd; 4173 int i, delta, noise; 4174 4175 /* Get minimal noise among connected antennas. */ 4176 noise = INT_MAX; /* NB: There's at least one antenna. */ 4177 for (i = 0; i < 3; i++) 4178 if (sc->chainmask & (1 << i)) 4179 noise = MIN(calib->noise[i], noise); 4180 4181 memset(&cmd, 0, sizeof cmd); 4182 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4183 /* Set differential gains for connected antennas. */ 4184 for (i = 0; i < 3; i++) { 4185 if (sc->chainmask & (1 << i)) { 4186 /* Compute attenuation (in unit of 1.5dB). */ 4187 delta = (noise - (int32_t)calib->noise[i]) / 30; 4188 /* NB: delta <= 0 */ 4189 /* Limit to [-4.5dB,0]. */ 4190 cmd.gain[i] = MIN(abs(delta), 3); 4191 if (delta < 0) 4192 cmd.gain[i] |= 1 << 2; /* sign bit */ 4193 } 4194 } 4195 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4196 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4197 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4198 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4199 } 4200 4201 static int 4202 iwn5000_set_gains(struct iwn_softc *sc) 4203 { 4204 struct iwn_calib_state *calib = &sc->calib; 4205 struct iwn_phy_calib_gain cmd; 4206 int i, ant, delta, div; 4207 4208 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4209 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4210 4211 memset(&cmd, 0, sizeof cmd); 4212 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 4213 cmd.ngroups = 1; 4214 cmd.isvalid = 1; 4215 /* Get first available RX antenna as referential. */ 4216 ant = IWN_LSB(sc->rxchainmask); 4217 /* Set differential gains for other antennas. */ 4218 for (i = ant + 1; i < 3; i++) { 4219 if (sc->chainmask & (1 << i)) { 4220 /* The delta is relative to antenna "ant". */ 4221 delta = ((int32_t)calib->noise[ant] - 4222 (int32_t)calib->noise[i]) / div; 4223 /* Limit to [-4.5dB,+4.5dB]. */ 4224 cmd.gain[i - 1] = MIN(abs(delta), 3); 4225 if (delta < 0) 4226 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4227 } 4228 } 4229 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4230 "setting differential gains Ant B/C: %x/%x (%x)\n", 4231 cmd.gain[0], cmd.gain[1], sc->chainmask); 4232 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4233 } 4234 4235 /* 4236 * Tune RF RX sensitivity based on the number of false alarms detected 4237 * during the last beacon period. 4238 */ 4239 static void 4240 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4241 { 4242 #define inc(val, inc, max) \ 4243 if ((val) < (max)) { \ 4244 if ((val) < (max) - (inc)) \ 4245 (val) += (inc); \ 4246 else \ 4247 (val) = (max); \ 4248 needs_update = 1; \ 4249 } 4250 #define dec(val, dec, min) \ 4251 if ((val) > (min)) { \ 4252 if ((val) > (min) + (dec)) \ 4253 (val) -= (dec); \ 4254 else \ 4255 (val) = (min); \ 4256 needs_update = 1; \ 4257 } 4258 4259 const struct iwn_sensitivity_limits *limits = sc->limits; 4260 struct iwn_calib_state *calib = &sc->calib; 4261 uint32_t val, rxena, fa; 4262 uint32_t energy[3], energy_min; 4263 uint8_t noise[3], noise_ref; 4264 int i, needs_update = 0; 4265 4266 /* Check that we've been enabled long enough. */ 4267 rxena = le32toh(stats->general.load); 4268 if (rxena == 0) 4269 return; 4270 4271 /* Compute number of false alarms since last call for OFDM. */ 4272 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4273 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4274 fa *= 200 * 1024; /* 200TU */ 4275 4276 /* Save counters values for next call. */ 4277 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4278 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4279 4280 if (fa > 50 * rxena) { 4281 /* High false alarm count, decrease sensitivity. */ 4282 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4283 "%s: OFDM high false alarm count: %u\n", __func__, fa); 4284 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4285 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4286 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4287 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4288 4289 } else if (fa < 5 * rxena) { 4290 /* Low false alarm count, increase sensitivity. */ 4291 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4292 "%s: OFDM low false alarm count: %u\n", __func__, fa); 4293 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4294 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4295 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4296 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4297 } 4298 4299 /* Compute maximum noise among 3 receivers. */ 4300 for (i = 0; i < 3; i++) 4301 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4302 val = MAX(noise[0], noise[1]); 4303 val = MAX(noise[2], val); 4304 /* Insert it into our samples table. */ 4305 calib->noise_samples[calib->cur_noise_sample] = val; 4306 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4307 4308 /* Compute maximum noise among last 20 samples. */ 4309 noise_ref = calib->noise_samples[0]; 4310 for (i = 1; i < 20; i++) 4311 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4312 4313 /* Compute maximum energy among 3 receivers. */ 4314 for (i = 0; i < 3; i++) 4315 energy[i] = le32toh(stats->general.energy[i]); 4316 val = MIN(energy[0], energy[1]); 4317 val = MIN(energy[2], val); 4318 /* Insert it into our samples table. */ 4319 calib->energy_samples[calib->cur_energy_sample] = val; 4320 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4321 4322 /* Compute minimum energy among last 10 samples. */ 4323 energy_min = calib->energy_samples[0]; 4324 for (i = 1; i < 10; i++) 4325 energy_min = MAX(energy_min, calib->energy_samples[i]); 4326 energy_min += 6; 4327 4328 /* Compute number of false alarms since last call for CCK. */ 4329 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4330 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4331 fa *= 200 * 1024; /* 200TU */ 4332 4333 /* Save counters values for next call. */ 4334 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4335 calib->fa_cck = le32toh(stats->cck.fa); 4336 4337 if (fa > 50 * rxena) { 4338 /* High false alarm count, decrease sensitivity. */ 4339 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4340 "%s: CCK high false alarm count: %u\n", __func__, fa); 4341 calib->cck_state = IWN_CCK_STATE_HIFA; 4342 calib->low_fa = 0; 4343 4344 if (calib->cck_x4 > 160) { 4345 calib->noise_ref = noise_ref; 4346 if (calib->energy_cck > 2) 4347 dec(calib->energy_cck, 2, energy_min); 4348 } 4349 if (calib->cck_x4 < 160) { 4350 calib->cck_x4 = 161; 4351 needs_update = 1; 4352 } else 4353 inc(calib->cck_x4, 3, limits->max_cck_x4); 4354 4355 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4356 4357 } else if (fa < 5 * rxena) { 4358 /* Low false alarm count, increase sensitivity. */ 4359 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4360 "%s: CCK low false alarm count: %u\n", __func__, fa); 4361 calib->cck_state = IWN_CCK_STATE_LOFA; 4362 calib->low_fa++; 4363 4364 if (calib->cck_state != IWN_CCK_STATE_INIT && 4365 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4366 calib->low_fa > 100)) { 4367 inc(calib->energy_cck, 2, limits->min_energy_cck); 4368 dec(calib->cck_x4, 3, limits->min_cck_x4); 4369 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4370 } 4371 } else { 4372 /* Not worth to increase or decrease sensitivity. */ 4373 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4374 "%s: CCK normal false alarm count: %u\n", __func__, fa); 4375 calib->low_fa = 0; 4376 calib->noise_ref = noise_ref; 4377 4378 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4379 /* Previous interval had many false alarms. */ 4380 dec(calib->energy_cck, 8, energy_min); 4381 } 4382 calib->cck_state = IWN_CCK_STATE_INIT; 4383 } 4384 4385 if (needs_update) 4386 (void)iwn_send_sensitivity(sc); 4387 #undef dec 4388 #undef inc 4389 } 4390 4391 static int 4392 iwn_send_sensitivity(struct iwn_softc *sc) 4393 { 4394 struct iwn_calib_state *calib = &sc->calib; 4395 struct iwn_sensitivity_cmd cmd; 4396 4397 memset(&cmd, 0, sizeof cmd); 4398 cmd.which = IWN_SENSITIVITY_WORKTBL; 4399 /* OFDM modulation. */ 4400 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4401 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4402 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4403 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4404 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4405 cmd.energy_ofdm_th = htole16(62); 4406 /* CCK modulation. */ 4407 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4408 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4409 cmd.energy_cck = htole16(calib->energy_cck); 4410 /* Barker modulation: use default values. */ 4411 cmd.corr_barker = htole16(190); 4412 cmd.corr_barker_mrc = htole16(390); 4413 4414 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4415 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 4416 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4417 calib->ofdm_mrc_x4, calib->cck_x4, 4418 calib->cck_mrc_x4, calib->energy_cck); 4419 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1); 4420 } 4421 4422 /* 4423 * Set STA mode power saving level (between 0 and 5). 4424 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4425 */ 4426 static int 4427 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4428 { 4429 const struct iwn_pmgt *pmgt; 4430 struct iwn_pmgt_cmd cmd; 4431 uint32_t max, skip_dtim; 4432 uint32_t tmp; 4433 int i; 4434 4435 /* Select which PS parameters to use. */ 4436 if (dtim <= 2) 4437 pmgt = &iwn_pmgt[0][level]; 4438 else if (dtim <= 10) 4439 pmgt = &iwn_pmgt[1][level]; 4440 else 4441 pmgt = &iwn_pmgt[2][level]; 4442 4443 memset(&cmd, 0, sizeof cmd); 4444 if (level != 0) /* not CAM */ 4445 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4446 if (level == 5) 4447 cmd.flags |= htole16(IWN_PS_FAST_PD); 4448 /* Retrieve PCIe Active State Power Management (ASPM). */ 4449 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4450 if (!(tmp & 0x1)) /* L0s Entry disabled. */ 4451 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4452 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4453 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4454 4455 if (dtim == 0) { 4456 dtim = 1; 4457 skip_dtim = 0; 4458 } else 4459 skip_dtim = pmgt->skip_dtim; 4460 if (skip_dtim != 0) { 4461 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4462 max = pmgt->intval[4]; 4463 if (max == (uint32_t)-1) 4464 max = dtim * (skip_dtim + 1); 4465 else if (max > dtim) 4466 max = (max / dtim) * dtim; 4467 } else 4468 max = dtim; 4469 for (i = 0; i < 5; i++) 4470 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4471 4472 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 4473 level); 4474 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4475 } 4476 4477 static int 4478 iwn_config(struct iwn_softc *sc) 4479 { 4480 const struct iwn_hal *hal = sc->sc_hal; 4481 struct ifnet *ifp = sc->sc_ifp; 4482 struct ieee80211com *ic = ifp->if_l2com; 4483 struct iwn_bluetooth bluetooth; 4484 uint32_t txmask; 4485 int error; 4486 uint16_t rxchain; 4487 4488 /* Configure valid TX chains for 5000 Series. */ 4489 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4490 txmask = htole32(sc->txchainmask); 4491 DPRINTF(sc, IWN_DEBUG_RESET, 4492 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 4493 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4494 sizeof txmask, 0); 4495 if (error != 0) { 4496 device_printf(sc->sc_dev, 4497 "%s: could not configure valid TX chains, " 4498 "error %d\n", __func__, error); 4499 return error; 4500 } 4501 } 4502 4503 /* Configure bluetooth coexistence. */ 4504 memset(&bluetooth, 0, sizeof bluetooth); 4505 bluetooth.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4506 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4507 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4508 DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n", 4509 __func__); 4510 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4511 if (error != 0) { 4512 device_printf(sc->sc_dev, 4513 "%s: could not configure bluetooth coexistence, error %d\n", 4514 __func__, error); 4515 return error; 4516 } 4517 4518 /* Set mode, channel, RX filter and enable RX. */ 4519 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4520 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 4521 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 4522 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 4523 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4524 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 4525 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4526 switch (ic->ic_opmode) { 4527 case IEEE80211_M_STA: 4528 sc->rxon.mode = IWN_MODE_STA; 4529 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4530 break; 4531 case IEEE80211_M_MONITOR: 4532 sc->rxon.mode = IWN_MODE_MONITOR; 4533 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4534 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4535 break; 4536 default: 4537 /* Should not get there. */ 4538 break; 4539 } 4540 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4541 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4542 sc->rxon.ht_single_mask = 0xff; 4543 sc->rxon.ht_dual_mask = 0xff; 4544 sc->rxon.ht_triple_mask = 0xff; 4545 rxchain = 4546 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4547 IWN_RXCHAIN_MIMO_COUNT(2) | 4548 IWN_RXCHAIN_IDLE_COUNT(2); 4549 sc->rxon.rxchain = htole16(rxchain); 4550 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 4551 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0); 4552 if (error != 0) { 4553 device_printf(sc->sc_dev, 4554 "%s: RXON command failed\n", __func__); 4555 return error; 4556 } 4557 4558 error = iwn_add_broadcast_node(sc, 0); 4559 if (error != 0) { 4560 device_printf(sc->sc_dev, 4561 "%s: could not add broadcast node\n", __func__); 4562 return error; 4563 } 4564 4565 /* Configuration has changed, set TX power accordingly. */ 4566 error = hal->set_txpower(sc, ic->ic_curchan, 0); 4567 if (error != 0) { 4568 device_printf(sc->sc_dev, 4569 "%s: could not set TX power\n", __func__); 4570 return error; 4571 } 4572 4573 error = iwn_set_critical_temp(sc); 4574 if (error != 0) { 4575 device_printf(sc->sc_dev, 4576 "%s: ccould not set critical temperature\n", __func__); 4577 return error; 4578 } 4579 4580 /* Set power saving level to CAM during initialization. */ 4581 error = iwn_set_pslevel(sc, 0, 0, 0); 4582 if (error != 0) { 4583 device_printf(sc->sc_dev, 4584 "%s: could not set power saving level\n", __func__); 4585 return error; 4586 } 4587 return 0; 4588 } 4589 4590 static int 4591 iwn_scan(struct iwn_softc *sc) 4592 { 4593 struct ifnet *ifp = sc->sc_ifp; 4594 struct ieee80211com *ic = ifp->if_l2com; 4595 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 4596 struct iwn_scan_hdr *hdr; 4597 struct iwn_cmd_data *tx; 4598 struct iwn_scan_essid *essid; 4599 struct iwn_scan_chan *chan; 4600 struct ieee80211_frame *wh; 4601 struct ieee80211_rateset *rs; 4602 struct ieee80211_channel *c; 4603 int buflen, error, nrates; 4604 uint16_t rxchain; 4605 uint8_t *buf, *frm, txant; 4606 4607 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4608 if (buf == NULL) { 4609 device_printf(sc->sc_dev, 4610 "%s: could not allocate buffer for scan command\n", 4611 __func__); 4612 return ENOMEM; 4613 } 4614 hdr = (struct iwn_scan_hdr *)buf; 4615 4616 /* 4617 * Move to the next channel if no frames are received within 10ms 4618 * after sending the probe request. 4619 */ 4620 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4621 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4622 4623 /* Select antennas for scanning. */ 4624 rxchain = 4625 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4626 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4627 IWN_RXCHAIN_DRIVER_FORCE; 4628 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 4629 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4630 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4631 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4632 } else /* Use all available RX antennas. */ 4633 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4634 hdr->rxchain = htole16(rxchain); 4635 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4636 4637 tx = (struct iwn_cmd_data *)(hdr + 1); 4638 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4639 tx->id = sc->sc_hal->broadcast_id; 4640 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4641 4642 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { 4643 /* Send probe requests at 6Mbps. */ 4644 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4645 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4646 } else { 4647 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4648 /* Send probe requests at 1Mbps. */ 4649 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4650 tx->rflags = IWN_RFLAG_CCK; 4651 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4652 } 4653 /* Use the first valid TX antenna. */ 4654 txant = IWN_LSB(sc->txchainmask); 4655 tx->rflags |= IWN_RFLAG_ANT(txant); 4656 4657 essid = (struct iwn_scan_essid *)(tx + 1); 4658 if (ss->ss_ssid[0].len != 0) { 4659 essid[0].id = IEEE80211_ELEMID_SSID; 4660 essid[0].len = ss->ss_ssid[0].len; 4661 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4662 } 4663 4664 /* 4665 * Build a probe request frame. Most of the following code is a 4666 * copy & paste of what is done in net80211. 4667 */ 4668 wh = (struct ieee80211_frame *)(essid + 20); 4669 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4670 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4671 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4672 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4673 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 4674 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4675 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4676 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4677 4678 frm = (uint8_t *)(wh + 1); 4679 4680 /* Add SSID IE. */ 4681 *frm++ = IEEE80211_ELEMID_SSID; 4682 *frm++ = ss->ss_ssid[0].len; 4683 memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4684 frm += ss->ss_ssid[0].len; 4685 4686 /* Add supported rates IE. */ 4687 *frm++ = IEEE80211_ELEMID_RATES; 4688 nrates = rs->rs_nrates; 4689 if (nrates > IEEE80211_RATE_SIZE) 4690 nrates = IEEE80211_RATE_SIZE; 4691 *frm++ = nrates; 4692 memcpy(frm, rs->rs_rates, nrates); 4693 frm += nrates; 4694 4695 /* Add supported xrates IE. */ 4696 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 4697 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; 4698 *frm++ = IEEE80211_ELEMID_XRATES; 4699 *frm++ = (uint8_t)nrates; 4700 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); 4701 frm += nrates; 4702 } 4703 4704 /* Set length of probe request. */ 4705 tx->len = htole16(frm - (uint8_t *)wh); 4706 4707 c = ic->ic_curchan; 4708 chan = (struct iwn_scan_chan *)frm; 4709 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4710 chan->flags = 0; 4711 if (ss->ss_nssid > 0) 4712 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4713 chan->dsp_gain = 0x6e; 4714 if (IEEE80211_IS_CHAN_5GHZ(c) && 4715 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4716 chan->rf_gain = 0x3b; 4717 chan->active = htole16(24); 4718 chan->passive = htole16(110); 4719 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4720 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 4721 chan->rf_gain = 0x3b; 4722 chan->active = htole16(24); 4723 if (sc->rxon.associd) 4724 chan->passive = htole16(78); 4725 else 4726 chan->passive = htole16(110); 4727 hdr->crc_threshold = 0xffff; 4728 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4729 chan->rf_gain = 0x28; 4730 chan->active = htole16(36); 4731 chan->passive = htole16(120); 4732 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4733 } else { 4734 chan->rf_gain = 0x28; 4735 chan->active = htole16(36); 4736 if (sc->rxon.associd) 4737 chan->passive = htole16(88); 4738 else 4739 chan->passive = htole16(120); 4740 hdr->crc_threshold = 0xffff; 4741 } 4742 4743 DPRINTF(sc, IWN_DEBUG_STATE, 4744 "%s: chan %u flags 0x%x rf_gain 0x%x " 4745 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 4746 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 4747 chan->active, chan->passive); 4748 4749 hdr->nchan++; 4750 chan++; 4751 buflen = (uint8_t *)chan - buf; 4752 hdr->len = htole16(buflen); 4753 4754 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 4755 hdr->nchan); 4756 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4757 free(buf, M_DEVBUF); 4758 return error; 4759 } 4760 4761 static int 4762 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 4763 { 4764 const struct iwn_hal *hal = sc->sc_hal; 4765 struct ifnet *ifp = sc->sc_ifp; 4766 struct ieee80211com *ic = ifp->if_l2com; 4767 struct ieee80211_node *ni = vap->iv_bss; 4768 int error; 4769 4770 sc->calib.state = IWN_CALIB_STATE_INIT; 4771 4772 /* Update adapter configuration. */ 4773 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4774 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4775 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4776 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4777 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4778 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4779 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4780 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4781 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4782 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4783 sc->rxon.cck_mask = 0; 4784 sc->rxon.ofdm_mask = 0x15; 4785 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4786 sc->rxon.cck_mask = 0x03; 4787 sc->rxon.ofdm_mask = 0; 4788 } else { 4789 /* XXX assume 802.11b/g */ 4790 sc->rxon.cck_mask = 0x0f; 4791 sc->rxon.ofdm_mask = 0x15; 4792 } 4793 DPRINTF(sc, IWN_DEBUG_STATE, 4794 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4795 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4796 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4797 __func__, 4798 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4799 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4800 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4801 le16toh(sc->rxon.rxchain), 4802 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4803 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4804 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4805 if (error != 0) { 4806 device_printf(sc->sc_dev, 4807 "%s: RXON command failed, error %d\n", __func__, error); 4808 return error; 4809 } 4810 4811 /* Configuration has changed, set TX power accordingly. */ 4812 error = hal->set_txpower(sc, ni->ni_chan, 1); 4813 if (error != 0) { 4814 device_printf(sc->sc_dev, 4815 "%s: could not set Tx power, error %d\n", __func__, error); 4816 return error; 4817 } 4818 /* 4819 * Reconfiguring RXON clears the firmware nodes table so we must 4820 * add the broadcast node again. 4821 */ 4822 error = iwn_add_broadcast_node(sc, 1); 4823 if (error != 0) { 4824 device_printf(sc->sc_dev, 4825 "%s: could not add broadcast node, error %d\n", 4826 __func__, error); 4827 return error; 4828 } 4829 return 0; 4830 } 4831 4832 /* 4833 * Configure the adapter for associated state. 4834 */ 4835 static int 4836 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 4837 { 4838 #define MS(v,x) (((v) & x) >> x##_S) 4839 const struct iwn_hal *hal = sc->sc_hal; 4840 struct ifnet *ifp = sc->sc_ifp; 4841 struct ieee80211com *ic = ifp->if_l2com; 4842 struct ieee80211_node *ni = vap->iv_bss; 4843 struct iwn_node_info node; 4844 int error; 4845 4846 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4847 /* Link LED blinks while monitoring. */ 4848 iwn_set_led(sc, IWN_LED_LINK, 20, 20); 4849 return 0; 4850 } 4851 error = iwn_set_timing(sc, ni); 4852 if (error != 0) { 4853 device_printf(sc->sc_dev, 4854 "%s: could not set timing, error %d\n", __func__, error); 4855 return error; 4856 } 4857 4858 /* Update adapter configuration. */ 4859 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4860 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4861 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4862 /* Short preamble and slot time are negotiated when associating. */ 4863 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4864 sc->rxon.flags |= htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4865 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4866 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4867 else 4868 sc->rxon.flags &= ~htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4869 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4870 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4871 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4872 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4873 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4874 sc->rxon.cck_mask = 0; 4875 sc->rxon.ofdm_mask = 0x15; 4876 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4877 sc->rxon.cck_mask = 0x03; 4878 sc->rxon.ofdm_mask = 0; 4879 } else { 4880 /* XXX assume 802.11b/g */ 4881 sc->rxon.cck_mask = 0x0f; 4882 sc->rxon.ofdm_mask = 0x15; 4883 } 4884 #if 0 /* HT */ 4885 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 4886 sc->rxon.flags &= ~htole32(IWN_RXON_HT); 4887 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) 4888 sc->rxon.flags |= htole32(IWN_RXON_HT40U); 4889 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 4890 sc->rxon.flags |= htole32(IWN_RXON_HT40D); 4891 else 4892 sc->rxon.flags |= htole32(IWN_RXON_HT20); 4893 sc->rxon.rxchain = htole16( 4894 IWN_RXCHAIN_VALID(3) 4895 | IWN_RXCHAIN_MIMO_COUNT(3) 4896 | IWN_RXCHAIN_IDLE_COUNT(1) 4897 | IWN_RXCHAIN_MIMO_FORCE); 4898 4899 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); 4900 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); 4901 } else 4902 maxrxampdu = ampdudensity = 0; 4903 #endif 4904 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4905 4906 DPRINTF(sc, IWN_DEBUG_STATE, 4907 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4908 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4909 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4910 __func__, 4911 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4912 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4913 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4914 le16toh(sc->rxon.rxchain), 4915 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4916 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4917 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4918 if (error != 0) { 4919 device_printf(sc->sc_dev, 4920 "%s: could not update configuration, error %d\n", 4921 __func__, error); 4922 return error; 4923 } 4924 4925 /* Configuration has changed, set TX power accordingly. */ 4926 error = hal->set_txpower(sc, ni->ni_chan, 1); 4927 if (error != 0) { 4928 device_printf(sc->sc_dev, 4929 "%s: could not set Tx power, error %d\n", __func__, error); 4930 return error; 4931 } 4932 4933 /* Add BSS node. */ 4934 memset(&node, 0, sizeof node); 4935 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4936 node.id = IWN_ID_BSS; 4937 #ifdef notyet 4938 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4939 IWN_AMDPU_DENSITY(5)); /* 2us */ 4940 #endif 4941 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n", 4942 __func__, node.id, le32toh(node.htflags)); 4943 error = hal->add_node(sc, &node, 1); 4944 if (error != 0) { 4945 device_printf(sc->sc_dev, "could not add BSS node\n"); 4946 return error; 4947 } 4948 DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n", 4949 node.id); 4950 error = iwn_set_link_quality(sc, node.id, 1); 4951 if (error != 0) { 4952 device_printf(sc->sc_dev, 4953 "%s: could not setup MRR for node %d, error %d\n", 4954 __func__, node.id, error); 4955 return error; 4956 } 4957 4958 error = iwn_init_sensitivity(sc); 4959 if (error != 0) { 4960 device_printf(sc->sc_dev, 4961 "%s: could not set sensitivity, error %d\n", 4962 __func__, error); 4963 return error; 4964 } 4965 4966 /* Start periodic calibration timer. */ 4967 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4968 iwn_calib_reset(sc); 4969 4970 /* Link LED always on while associated. */ 4971 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4972 4973 return 0; 4974 #undef MS 4975 } 4976 4977 #if 0 /* HT */ 4978 /* 4979 * This function is called by upper layer when an ADDBA request is received 4980 * from another STA and before the ADDBA response is sent. 4981 */ 4982 static int 4983 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4984 uint8_t tid) 4985 { 4986 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 4987 struct iwn_softc *sc = ic->ic_softc; 4988 struct iwn_node *wn = (void *)ni; 4989 struct iwn_node_info node; 4990 4991 memset(&node, 0, sizeof node); 4992 node.id = wn->id; 4993 node.control = IWN_NODE_UPDATE; 4994 node.flags = IWN_FLAG_SET_ADDBA; 4995 node.addba_tid = tid; 4996 node.addba_ssn = htole16(ba->ba_winstart); 4997 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 4998 wn->id, tid, ba->ba_winstart)); 4999 return sc->sc_hal->add_node(sc, &node, 1); 5000 } 5001 5002 /* 5003 * This function is called by upper layer on teardown of an HT-immediate 5004 * Block Ack agreement (eg. uppon receipt of a DELBA frame.) 5005 */ 5006 static void 5007 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5008 uint8_t tid) 5009 { 5010 struct iwn_softc *sc = ic->ic_softc; 5011 struct iwn_node *wn = (void *)ni; 5012 struct iwn_node_info node; 5013 5014 memset(&node, 0, sizeof node); 5015 node.id = wn->id; 5016 node.control = IWN_NODE_UPDATE; 5017 node.flags = IWN_FLAG_SET_DELBA; 5018 node.delba_tid = tid; 5019 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5020 (void)sc->sc_hal->add_node(sc, &node, 1); 5021 } 5022 5023 /* 5024 * This function is called by upper layer when an ADDBA response is received 5025 * from another STA. 5026 */ 5027 static int 5028 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5029 uint8_t tid) 5030 { 5031 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5032 struct iwn_softc *sc = ic->ic_softc; 5033 const struct iwn_hal *hal = sc->sc_hal; 5034 struct iwn_node *wn = (void *)ni; 5035 struct iwn_node_info node; 5036 int error; 5037 5038 /* Enable TX for the specified RA/TID. */ 5039 wn->disable_tid &= ~(1 << tid); 5040 memset(&node, 0, sizeof node); 5041 node.id = wn->id; 5042 node.control = IWN_NODE_UPDATE; 5043 node.flags = IWN_FLAG_SET_DISABLE_TID; 5044 node.disable_tid = htole16(wn->disable_tid); 5045 error = hal->add_node(sc, &node, 1); 5046 if (error != 0) 5047 return error; 5048 5049 if ((error = iwn_nic_lock(sc)) != 0) 5050 return error; 5051 hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5052 iwn_nic_unlock(sc); 5053 return 0; 5054 } 5055 5056 static void 5057 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5058 uint8_t tid) 5059 { 5060 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5061 struct iwn_softc *sc = ic->ic_softc; 5062 int error; 5063 5064 error = iwn_nic_lock(sc); 5065 if (error != 0) 5066 return; 5067 sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5068 iwn_nic_unlock(sc); 5069 } 5070 5071 static void 5072 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5073 uint8_t tid, uint16_t ssn) 5074 { 5075 struct iwn_node *wn = (void *)ni; 5076 int qid = 7 + tid; 5077 5078 /* Stop TX scheduler while we're changing its configuration. */ 5079 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5080 IWN4965_TXQ_STATUS_CHGACT); 5081 5082 /* Assign RA/TID translation to the queue. */ 5083 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5084 wn->id << 4 | tid); 5085 5086 /* Enable chain-building mode for the queue. */ 5087 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5088 5089 /* Set starting sequence number from the ADDBA request. */ 5090 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5091 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5092 5093 /* Set scheduler window size. */ 5094 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5095 IWN_SCHED_WINSZ); 5096 /* Set scheduler frame limit. */ 5097 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5098 IWN_SCHED_LIMIT << 16); 5099 5100 /* Enable interrupts for the queue. */ 5101 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5102 5103 /* Mark the queue as active. */ 5104 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5105 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5106 iwn_tid2fifo[tid] << 1); 5107 } 5108 5109 static void 5110 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5111 { 5112 int qid = 7 + tid; 5113 5114 /* Stop TX scheduler while we're changing its configuration. */ 5115 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5116 IWN4965_TXQ_STATUS_CHGACT); 5117 5118 /* Set starting sequence number from the ADDBA request. */ 5119 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5120 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5121 5122 /* Disable interrupts for the queue. */ 5123 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5124 5125 /* Mark the queue as inactive. */ 5126 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5127 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5128 } 5129 5130 static void 5131 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5132 uint8_t tid, uint16_t ssn) 5133 { 5134 struct iwn_node *wn = (void *)ni; 5135 int qid = 10 + tid; 5136 5137 /* Stop TX scheduler while we're changing its configuration. */ 5138 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5139 IWN5000_TXQ_STATUS_CHGACT); 5140 5141 /* Assign RA/TID translation to the queue. */ 5142 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5143 wn->id << 4 | tid); 5144 5145 /* Enable chain-building mode for the queue. */ 5146 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5147 5148 /* Enable aggregation for the queue. */ 5149 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5150 5151 /* Set starting sequence number from the ADDBA request. */ 5152 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5153 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5154 5155 /* Set scheduler window size and frame limit. */ 5156 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5157 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5158 5159 /* Enable interrupts for the queue. */ 5160 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5161 5162 /* Mark the queue as active. */ 5163 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5164 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5165 } 5166 5167 static void 5168 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5169 { 5170 int qid = 10 + tid; 5171 5172 /* Stop TX scheduler while we're changing its configuration. */ 5173 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5174 IWN5000_TXQ_STATUS_CHGACT); 5175 5176 /* Disable aggregation for the queue. */ 5177 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5178 5179 /* Set starting sequence number from the ADDBA request. */ 5180 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5181 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5182 5183 /* Disable interrupts for the queue. */ 5184 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5185 5186 /* Mark the queue as inactive. */ 5187 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5188 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5189 } 5190 #endif 5191 5192 /* 5193 * Send calibration results to the runtime firmware. These results were 5194 * obtained on first boot from the initialization firmware, or by reading 5195 * the EEPROM for crystal calibration. 5196 */ 5197 static int 5198 iwn5000_send_calib_results(struct iwn_softc *sc) 5199 { 5200 struct iwn_calib_info *calib_result; 5201 int idx, error; 5202 5203 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5204 calib_result = &sc->calib_results[idx]; 5205 5206 /* No support for this type of calibration. */ 5207 if ((sc->calib_init & (1 << idx)) == 0) 5208 continue; 5209 5210 /* No calibration result available. */ 5211 if (calib_result->buf == NULL) 5212 continue; 5213 5214 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5215 "%s: send calibration result idx=%d, len=%d\n", 5216 __func__, idx, calib_result->len); 5217 5218 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf, 5219 calib_result->len, 0); 5220 if (error != 0) { 5221 device_printf(sc->sc_dev, 5222 "%s: could not send calibration result " 5223 "idx=%d, error=%d\n", 5224 __func__, idx, error); 5225 return error; 5226 } 5227 } 5228 return 0; 5229 } 5230 5231 /* 5232 * Save calibration result at the given index. The index determines 5233 * in which order the results are sent to the runtime firmware. 5234 */ 5235 static int 5236 iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib, 5237 int len, int idx) 5238 { 5239 struct iwn_calib_info *calib_result = &sc->calib_results[idx]; 5240 5241 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5242 "%s: saving calibration result code=%d, idx=%d, len=%d\n", 5243 __func__, calib->code, idx, len); 5244 5245 if (calib_result->buf != NULL) 5246 free(calib_result->buf, M_DEVBUF); 5247 5248 calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT); 5249 if (calib_result->buf == NULL) { 5250 device_printf(sc->sc_dev, 5251 "%s: not enough memory for calibration result " 5252 "code=%d, len=%d\n", __func__, calib->code, len); 5253 return ENOMEM; 5254 } 5255 5256 calib_result->len = len; 5257 memcpy(calib_result->buf, calib, len); 5258 return 0; 5259 } 5260 5261 static void 5262 iwn5000_free_calib_results(struct iwn_softc *sc) 5263 { 5264 struct iwn_calib_info *calib_result; 5265 int idx; 5266 5267 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5268 calib_result = &sc->calib_results[idx]; 5269 5270 if (calib_result->buf != NULL) 5271 free(calib_result->buf, M_DEVBUF); 5272 5273 calib_result->buf = NULL; 5274 calib_result->len = 0; 5275 } 5276 } 5277 5278 /* 5279 * Obtain the crystal calibration result from the EEPROM. 5280 */ 5281 static int 5282 iwn5000_chrystal_calib(struct iwn_softc *sc) 5283 { 5284 struct iwn5000_phy_calib_crystal cmd; 5285 uint32_t base, crystal; 5286 uint16_t val; 5287 5288 /* Read crystal calibration. */ 5289 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 5290 base = le16toh(val); 5291 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal, 5292 sizeof(uint32_t)); 5293 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n", 5294 __func__, le32toh(crystal)); 5295 5296 memset(&cmd, 0, sizeof cmd); 5297 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5298 cmd.ngroups = 1; 5299 cmd.isvalid = 1; 5300 cmd.cap_pin[0] = le32toh(crystal) & 0xff; 5301 cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff; 5302 5303 return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd, 5304 sizeof cmd, IWN_CALIB_IDX_XTAL); 5305 } 5306 5307 /* 5308 * Query calibration results from the initialization firmware. We do this 5309 * only once at first boot. 5310 */ 5311 static int 5312 iwn5000_send_calib_query(struct iwn_softc *sc, uint32_t cfg) 5313 { 5314 #define CALIB_INIT_CFG 0xffffffff; 5315 struct iwn5000_calib_config cmd; 5316 int error; 5317 5318 memset(&cmd, 0, sizeof cmd); 5319 cmd.ucode.once.enable = CALIB_INIT_CFG; 5320 if (cfg == 0) { 5321 cmd.ucode.once.start = CALIB_INIT_CFG; 5322 cmd.ucode.once.send = CALIB_INIT_CFG; 5323 cmd.ucode.flags = CALIB_INIT_CFG; 5324 } else 5325 cmd.ucode.once.start = cfg; 5326 5327 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5328 "%s: query calibration results, cfg %x\n", __func__, cfg); 5329 5330 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5331 if (error != 0) 5332 return error; 5333 5334 /* Wait at most two seconds for calibration to complete. */ 5335 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5336 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz); 5337 5338 return error; 5339 #undef CALIB_INIT_CFG 5340 } 5341 5342 /* 5343 * Process a CALIBRATION_RESULT notification sent by the initialization 5344 * firmware on response to a CMD_CALIB_CONFIG command. 5345 */ 5346 static int 5347 iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc, 5348 struct iwn_rx_data *data) 5349 { 5350 #define FRAME_SIZE_MASK 0x3fff 5351 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 5352 int len, idx; 5353 5354 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 5355 len = (le32toh(desc->len) & FRAME_SIZE_MASK); 5356 5357 /* Remove length field itself. */ 5358 len -= 4; 5359 5360 /* 5361 * Determine the order in which the results will be send to the 5362 * runtime firmware. 5363 */ 5364 switch (calib->code) { 5365 case IWN5000_PHY_CALIB_DC: 5366 idx = IWN_CALIB_IDX_DC; 5367 break; 5368 case IWN5000_PHY_CALIB_LO: 5369 idx = IWN_CALIB_IDX_LO; 5370 break; 5371 case IWN5000_PHY_CALIB_TX_IQ: 5372 idx = IWN_CALIB_IDX_TX_IQ; 5373 break; 5374 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 5375 idx = IWN_CALIB_IDX_TX_IQ_PERIODIC; 5376 break; 5377 case IWN5000_PHY_CALIB_BASE_BAND: 5378 idx = IWN_CALIB_IDX_BASE_BAND; 5379 break; 5380 default: 5381 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5382 "%s: unknown calibration code=%d\n", __func__, calib->code); 5383 return EINVAL; 5384 } 5385 return iwn5000_save_calib_result(sc, calib, len, idx); 5386 #undef FRAME_SIZE_MASK 5387 } 5388 5389 static int 5390 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5391 { 5392 struct iwn5000_wimax_coex wimax; 5393 5394 #ifdef notyet 5395 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5396 /* Enable WiMAX coexistence for combo adapters. */ 5397 wimax.flags = 5398 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5399 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5400 IWN_WIMAX_COEX_STA_TABLE_VALID | 5401 IWN_WIMAX_COEX_ENABLE; 5402 memcpy(wimax.events, iwn6050_wimax_events, 5403 sizeof iwn6050_wimax_events); 5404 } else 5405 #endif 5406 { 5407 /* Disable WiMAX coexistence. */ 5408 wimax.flags = 0; 5409 memset(wimax.events, 0, sizeof wimax.events); 5410 } 5411 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 5412 __func__); 5413 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5414 } 5415 5416 /* 5417 * This function is called after the runtime firmware notifies us of its 5418 * readiness (called in a process context.) 5419 */ 5420 static int 5421 iwn4965_post_alive(struct iwn_softc *sc) 5422 { 5423 int error, qid; 5424 5425 if ((error = iwn_nic_lock(sc)) != 0) 5426 return error; 5427 5428 /* Clear TX scheduler state in SRAM. */ 5429 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5430 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5431 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5432 5433 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5434 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5435 5436 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5437 5438 /* Disable chain mode for all our 16 queues. */ 5439 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5440 5441 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5442 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5443 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5444 5445 /* Set scheduler window size. */ 5446 iwn_mem_write(sc, sc->sched_base + 5447 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5448 /* Set scheduler frame limit. */ 5449 iwn_mem_write(sc, sc->sched_base + 5450 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5451 IWN_SCHED_LIMIT << 16); 5452 } 5453 5454 /* Enable interrupts for all our 16 queues. */ 5455 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5456 /* Identify TX FIFO rings (0-7). */ 5457 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5458 5459 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5460 for (qid = 0; qid < 7; qid++) { 5461 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5462 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5463 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5464 } 5465 iwn_nic_unlock(sc); 5466 return 0; 5467 } 5468 5469 /* 5470 * This function is called after the initialization or runtime firmware 5471 * notifies us of its readiness (called in a process context.) 5472 */ 5473 static int 5474 iwn5000_post_alive(struct iwn_softc *sc) 5475 { 5476 int error, qid; 5477 5478 /* Switch to using ICT interrupt mode. */ 5479 iwn5000_ict_reset(sc); 5480 5481 error = iwn_nic_lock(sc); 5482 if (error != 0) 5483 return error; 5484 5485 /* Clear TX scheduler state in SRAM. */ 5486 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5487 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5488 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5489 5490 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5491 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5492 5493 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5494 5495 /* Enable chain mode for all queues, except command queue. */ 5496 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5497 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5498 5499 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5500 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5501 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5502 5503 iwn_mem_write(sc, sc->sched_base + 5504 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5505 /* Set scheduler window size and frame limit. */ 5506 iwn_mem_write(sc, sc->sched_base + 5507 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5508 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5509 } 5510 5511 /* Enable interrupts for all our 20 queues. */ 5512 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5513 /* Identify TX FIFO rings (0-7). */ 5514 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5515 5516 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5517 for (qid = 0; qid < 7; qid++) { 5518 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5519 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5520 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5521 } 5522 iwn_nic_unlock(sc); 5523 5524 /* Configure WiMAX coexistence for combo adapters. */ 5525 error = iwn5000_send_wimax_coex(sc); 5526 if (error != 0) { 5527 device_printf(sc->sc_dev, 5528 "%s: could not configure WiMAX coexistence, error %d\n", 5529 __func__, error); 5530 return error; 5531 } 5532 5533 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5534 /* 5535 * Start calibration by setting and sending the chrystal 5536 * calibration first, this must be done before we are able 5537 * to query the other calibration results. 5538 */ 5539 error = iwn5000_chrystal_calib(sc); 5540 if (error != 0) { 5541 device_printf(sc->sc_dev, 5542 "%s: could not set chrystal calibration, " 5543 "error=%d\n", __func__, error); 5544 return error; 5545 } 5546 error = iwn5000_send_calib_results(sc); 5547 if (error != 0) { 5548 device_printf(sc->sc_dev, 5549 "%s: could not send chrystal calibration, " 5550 "error=%d\n", __func__, error); 5551 return error; 5552 } 5553 5554 /* 5555 * Query other calibration results from the initialization 5556 * firmware. 5557 */ 5558 error = iwn5000_send_calib_query(sc, 0); 5559 if (error != 0) { 5560 device_printf(sc->sc_dev, 5561 "%s: could not query calibration, error=%d\n", 5562 __func__, error); 5563 return error; 5564 } 5565 5566 /* 5567 * We have the calibration results now, reboot with the 5568 * runtime firmware (call ourselves recursively!) 5569 */ 5570 iwn_hw_stop(sc); 5571 error = iwn_hw_init(sc); 5572 } else { 5573 /* 5574 * Send calibration results obtained from the initialization 5575 * firmware to the runtime firmware. 5576 */ 5577 error = iwn5000_send_calib_results(sc); 5578 5579 /* 5580 * Tell the runtime firmware to do certain calibration types. 5581 */ 5582 if (sc->calib_runtime != 0) { 5583 error = iwn5000_send_calib_query(sc, sc->calib_runtime); 5584 if (error != 0) { 5585 device_printf(sc->sc_dev, 5586 "%s: could not send query calibration, " 5587 "error=%d, cfg=%x\n", __func__, error, 5588 sc->calib_runtime); 5589 } 5590 } 5591 } 5592 return error; 5593 } 5594 5595 /* 5596 * The firmware boot code is small and is intended to be copied directly into 5597 * the NIC internal memory (no DMA transfer.) 5598 */ 5599 static int 5600 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5601 { 5602 int error, ntries; 5603 5604 size /= sizeof (uint32_t); 5605 5606 error = iwn_nic_lock(sc); 5607 if (error != 0) 5608 return error; 5609 5610 /* Copy microcode image into NIC memory. */ 5611 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5612 (const uint32_t *)ucode, size); 5613 5614 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5615 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5616 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5617 5618 /* Start boot load now. */ 5619 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5620 5621 /* Wait for transfer to complete. */ 5622 for (ntries = 0; ntries < 1000; ntries++) { 5623 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5624 IWN_BSM_WR_CTRL_START)) 5625 break; 5626 DELAY(10); 5627 } 5628 if (ntries == 1000) { 5629 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5630 __func__); 5631 iwn_nic_unlock(sc); 5632 return ETIMEDOUT; 5633 } 5634 5635 /* Enable boot after power up. */ 5636 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5637 5638 iwn_nic_unlock(sc); 5639 return 0; 5640 } 5641 5642 static int 5643 iwn4965_load_firmware(struct iwn_softc *sc) 5644 { 5645 struct iwn_fw_info *fw = &sc->fw; 5646 struct iwn_dma_info *dma = &sc->fw_dma; 5647 int error; 5648 5649 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5650 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5651 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5652 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5653 fw->init.text, fw->init.textsz); 5654 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5655 5656 /* Tell adapter where to find initialization sections. */ 5657 error = iwn_nic_lock(sc); 5658 if (error != 0) 5659 return error; 5660 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5661 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5662 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5663 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5664 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5665 iwn_nic_unlock(sc); 5666 5667 /* Load firmware boot code. */ 5668 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5669 if (error != 0) { 5670 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5671 __func__); 5672 return error; 5673 } 5674 /* Now press "execute". */ 5675 IWN_WRITE(sc, IWN_RESET, 0); 5676 5677 /* Wait at most one second for first alive notification. */ 5678 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5679 if (error) { 5680 device_printf(sc->sc_dev, 5681 "%s: timeout waiting for adapter to initialize, error %d\n", 5682 __func__, error); 5683 return error; 5684 } 5685 5686 /* Retrieve current temperature for initial TX power calibration. */ 5687 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5688 sc->temp = iwn4965_get_temperature(sc); 5689 5690 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5691 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5692 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5693 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5694 fw->main.text, fw->main.textsz); 5695 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5696 5697 /* Tell adapter where to find runtime sections. */ 5698 error = iwn_nic_lock(sc); 5699 if (error != 0) 5700 return error; 5701 5702 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5703 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5704 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5705 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5706 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5707 IWN_FW_UPDATED | fw->main.textsz); 5708 iwn_nic_unlock(sc); 5709 5710 return 0; 5711 } 5712 5713 static int 5714 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5715 const uint8_t *section, int size) 5716 { 5717 struct iwn_dma_info *dma = &sc->fw_dma; 5718 int error; 5719 5720 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5721 memcpy(dma->vaddr, section, size); 5722 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5723 5724 error = iwn_nic_lock(sc); 5725 if (error != 0) 5726 return error; 5727 5728 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5729 IWN_FH_TX_CONFIG_DMA_PAUSE); 5730 5731 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5732 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5733 IWN_LOADDR(dma->paddr)); 5734 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5735 IWN_HIADDR(dma->paddr) << 28 | size); 5736 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5737 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5738 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5739 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5740 5741 /* Kick Flow Handler to start DMA transfer. */ 5742 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5743 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5744 5745 iwn_nic_unlock(sc); 5746 5747 /* Wait at most five seconds for FH DMA transfer to complete. */ 5748 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5749 } 5750 5751 static int 5752 iwn5000_load_firmware(struct iwn_softc *sc) 5753 { 5754 struct iwn_fw_part *fw; 5755 int error; 5756 5757 /* Load the initialization firmware on first boot only. */ 5758 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5759 &sc->fw.main : &sc->fw.init; 5760 5761 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5762 fw->text, fw->textsz); 5763 if (error != 0) { 5764 device_printf(sc->sc_dev, 5765 "%s: could not load firmware %s section, error %d\n", 5766 __func__, ".text", error); 5767 return error; 5768 } 5769 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5770 fw->data, fw->datasz); 5771 if (error != 0) { 5772 device_printf(sc->sc_dev, 5773 "%s: could not load firmware %s section, error %d\n", 5774 __func__, ".data", error); 5775 return error; 5776 } 5777 5778 /* Now press "execute". */ 5779 IWN_WRITE(sc, IWN_RESET, 0); 5780 return 0; 5781 } 5782 5783 /* 5784 * Extract text and data sections from a legacy firmware image. 5785 */ 5786 static int 5787 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5788 { 5789 const uint32_t *ptr; 5790 size_t hdrlen = 24; 5791 uint32_t rev; 5792 5793 ptr = (const uint32_t *)sc->fw_fp->data; 5794 rev = le32toh(*ptr++); 5795 5796 /* Check firmware API version. */ 5797 if (IWN_FW_API(rev) <= 1) { 5798 device_printf(sc->sc_dev, 5799 "%s: bad firmware, need API version >=2\n", __func__); 5800 return EINVAL; 5801 } 5802 if (IWN_FW_API(rev) >= 3) { 5803 /* Skip build number (version 2 header). */ 5804 hdrlen += 4; 5805 ptr++; 5806 } 5807 if (fw->size < hdrlen) { 5808 device_printf(sc->sc_dev, 5809 "%s: firmware file too short: %zu bytes\n", 5810 __func__, fw->size); 5811 return EINVAL; 5812 } 5813 fw->main.textsz = le32toh(*ptr++); 5814 fw->main.datasz = le32toh(*ptr++); 5815 fw->init.textsz = le32toh(*ptr++); 5816 fw->init.datasz = le32toh(*ptr++); 5817 fw->boot.textsz = le32toh(*ptr++); 5818 5819 /* Check that all firmware sections fit. */ 5820 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5821 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5822 device_printf(sc->sc_dev, 5823 "%s: firmware file too short: %zu bytes\n", 5824 __func__, fw->size); 5825 return EINVAL; 5826 } 5827 5828 /* Get pointers to firmware sections. */ 5829 fw->main.text = (const uint8_t *)ptr; 5830 fw->main.data = fw->main.text + fw->main.textsz; 5831 fw->init.text = fw->main.data + fw->main.datasz; 5832 fw->init.data = fw->init.text + fw->init.textsz; 5833 fw->boot.text = fw->init.data + fw->init.datasz; 5834 5835 return 0; 5836 } 5837 5838 /* 5839 * Extract text and data sections from a TLV firmware image. 5840 */ 5841 int 5842 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5843 uint16_t alt) 5844 { 5845 const struct iwn_fw_tlv_hdr *hdr; 5846 const struct iwn_fw_tlv *tlv; 5847 const uint8_t *ptr, *end; 5848 uint64_t altmask; 5849 uint32_t len; 5850 5851 if (fw->size < sizeof (*hdr)) { 5852 device_printf(sc->sc_dev, 5853 "%s: firmware file too short: %zu bytes\n", 5854 __func__, fw->size); 5855 return EINVAL; 5856 } 5857 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5858 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5859 device_printf(sc->sc_dev, 5860 "%s: bad firmware file signature 0x%08x\n", 5861 __func__, le32toh(hdr->signature)); 5862 return EINVAL; 5863 } 5864 5865 /* 5866 * Select the closest supported alternative that is less than 5867 * or equal to the specified one. 5868 */ 5869 altmask = le64toh(hdr->altmask); 5870 while (alt > 0 && !(altmask & (1ULL << alt))) 5871 alt--; /* Downgrade. */ 5872 5873 ptr = (const uint8_t *)(hdr + 1); 5874 end = (const uint8_t *)(fw->data + fw->size); 5875 5876 /* Parse type-length-value fields. */ 5877 while (ptr + sizeof (*tlv) <= end) { 5878 tlv = (const struct iwn_fw_tlv *)ptr; 5879 len = le32toh(tlv->len); 5880 5881 ptr += sizeof (*tlv); 5882 if (ptr + len > end) { 5883 device_printf(sc->sc_dev, 5884 "%s: firmware file too short: %zu bytes\n", 5885 __func__, fw->size); 5886 return EINVAL; 5887 } 5888 /* Skip other alternatives. */ 5889 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5890 goto next; 5891 5892 switch (le16toh(tlv->type)) { 5893 case IWN_FW_TLV_MAIN_TEXT: 5894 fw->main.text = ptr; 5895 fw->main.textsz = len; 5896 break; 5897 case IWN_FW_TLV_MAIN_DATA: 5898 fw->main.data = ptr; 5899 fw->main.datasz = len; 5900 break; 5901 case IWN_FW_TLV_INIT_TEXT: 5902 fw->init.text = ptr; 5903 fw->init.textsz = len; 5904 break; 5905 case IWN_FW_TLV_INIT_DATA: 5906 fw->init.data = ptr; 5907 fw->init.datasz = len; 5908 break; 5909 case IWN_FW_TLV_BOOT_TEXT: 5910 fw->boot.text = ptr; 5911 fw->boot.textsz = len; 5912 break; 5913 default: 5914 DPRINTF(sc, IWN_DEBUG_RESET, 5915 "%s: TLV type %d not handled\n", 5916 __func__, le16toh(tlv->type)); 5917 break; 5918 } 5919 next: /* TLV fields are 32-bit aligned. */ 5920 ptr += (len + 3) & ~3; 5921 } 5922 return 0; 5923 } 5924 5925 static int 5926 iwn_read_firmware(struct iwn_softc *sc) 5927 { 5928 const struct iwn_hal *hal = sc->sc_hal; 5929 struct iwn_fw_info *fw = &sc->fw; 5930 int error; 5931 5932 IWN_UNLOCK(sc); 5933 5934 memset(fw, 0, sizeof (*fw)); 5935 5936 /* Read firmware image from filesystem. */ 5937 sc->fw_fp = firmware_get(sc->fwname); 5938 if (sc->fw_fp == NULL) { 5939 device_printf(sc->sc_dev, 5940 "%s: could not load firmare image \"%s\"\n", __func__, 5941 sc->fwname); 5942 IWN_LOCK(sc); 5943 return EINVAL; 5944 } 5945 IWN_LOCK(sc); 5946 5947 fw->size = sc->fw_fp->datasize; 5948 fw->data = (const uint8_t *)sc->fw_fp->data; 5949 if (fw->size < sizeof (uint32_t)) { 5950 device_printf(sc->sc_dev, 5951 "%s: firmware file too short: %zu bytes\n", 5952 __func__, fw->size); 5953 return EINVAL; 5954 } 5955 5956 /* Retrieve text and data sections. */ 5957 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5958 error = iwn_read_firmware_leg(sc, fw); 5959 else 5960 error = iwn_read_firmware_tlv(sc, fw, 1); 5961 if (error != 0) { 5962 device_printf(sc->sc_dev, 5963 "%s: could not read firmware sections\n", __func__); 5964 return error; 5965 } 5966 5967 /* Make sure text and data sections fit in hardware memory. */ 5968 if (fw->main.textsz > hal->fw_text_maxsz || 5969 fw->main.datasz > hal->fw_data_maxsz || 5970 fw->init.textsz > hal->fw_text_maxsz || 5971 fw->init.datasz > hal->fw_data_maxsz || 5972 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5973 (fw->boot.textsz & 3) != 0) { 5974 device_printf(sc->sc_dev, 5975 "%s: firmware sections too large\n", __func__); 5976 return EINVAL; 5977 } 5978 5979 /* We can proceed with loading the firmware. */ 5980 return 0; 5981 } 5982 5983 static int 5984 iwn_clock_wait(struct iwn_softc *sc) 5985 { 5986 int ntries; 5987 5988 /* Set "initialization complete" bit. */ 5989 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5990 5991 /* Wait for clock stabilization. */ 5992 for (ntries = 0; ntries < 2500; ntries++) { 5993 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5994 return 0; 5995 DELAY(10); 5996 } 5997 device_printf(sc->sc_dev, 5998 "%s: timeout waiting for clock stabilization\n", __func__); 5999 return ETIMEDOUT; 6000 } 6001 6002 static int 6003 iwn_apm_init(struct iwn_softc *sc) 6004 { 6005 uint32_t tmp; 6006 int error; 6007 6008 /* Disable L0s exit timer (NMI bug workaround.) */ 6009 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6010 /* Don't wait for ICH L0s (ICH bug workaround.) */ 6011 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6012 6013 /* Set FH wait threshold to max (HW bug under stress workaround.) */ 6014 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6015 6016 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6017 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6018 6019 /* Retrieve PCIe Active State Power Management (ASPM). */ 6020 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6021 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6022 if (tmp & 0x02) /* L1 Entry enabled. */ 6023 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6024 else 6025 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6026 6027 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6028 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6029 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6030 6031 /* Wait for clock stabilization before accessing prph. */ 6032 error = iwn_clock_wait(sc); 6033 if (error != 0) 6034 return error; 6035 6036 error = iwn_nic_lock(sc); 6037 if (error != 0) 6038 return error; 6039 6040 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6041 /* Enable DMA and BSM (Bootstrap State Machine.) */ 6042 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6043 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6044 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6045 } else { 6046 /* Enable DMA. */ 6047 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6048 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6049 } 6050 DELAY(20); 6051 6052 /* Disable L1-Active. */ 6053 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6054 iwn_nic_unlock(sc); 6055 6056 return 0; 6057 } 6058 6059 static void 6060 iwn_apm_stop_master(struct iwn_softc *sc) 6061 { 6062 int ntries; 6063 6064 /* Stop busmaster DMA activity. */ 6065 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6066 for (ntries = 0; ntries < 100; ntries++) { 6067 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6068 return; 6069 DELAY(10); 6070 } 6071 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 6072 __func__); 6073 } 6074 6075 static void 6076 iwn_apm_stop(struct iwn_softc *sc) 6077 { 6078 iwn_apm_stop_master(sc); 6079 6080 /* Reset the entire device. */ 6081 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6082 DELAY(10); 6083 /* Clear "initialization complete" bit. */ 6084 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6085 } 6086 6087 static int 6088 iwn4965_nic_config(struct iwn_softc *sc) 6089 { 6090 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6091 /* 6092 * I don't believe this to be correct but this is what the 6093 * vendor driver is doing. Probably the bits should not be 6094 * shifted in IWN_RFCFG_*. 6095 */ 6096 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6097 IWN_RFCFG_TYPE(sc->rfcfg) | 6098 IWN_RFCFG_STEP(sc->rfcfg) | 6099 IWN_RFCFG_DASH(sc->rfcfg)); 6100 } 6101 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6102 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6103 return 0; 6104 } 6105 6106 static int 6107 iwn5000_nic_config(struct iwn_softc *sc) 6108 { 6109 uint32_t tmp; 6110 int error; 6111 6112 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6113 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6114 IWN_RFCFG_TYPE(sc->rfcfg) | 6115 IWN_RFCFG_STEP(sc->rfcfg) | 6116 IWN_RFCFG_DASH(sc->rfcfg)); 6117 } 6118 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6119 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6120 6121 error = iwn_nic_lock(sc); 6122 if (error != 0) 6123 return error; 6124 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6125 6126 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6127 /* 6128 * Select first Switching Voltage Regulator (1.32V) to 6129 * solve a stability issue related to noisy DC2DC line 6130 * in the silicon of 1000 Series. 6131 */ 6132 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6133 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6134 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6135 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6136 } 6137 iwn_nic_unlock(sc); 6138 6139 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6140 /* Use internal power amplifier only. */ 6141 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6142 } 6143 if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) { 6144 /* Indicate that ROM calibration version is >=6. */ 6145 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6146 } 6147 return 0; 6148 } 6149 6150 /* 6151 * Take NIC ownership over Intel Active Management Technology (AMT). 6152 */ 6153 static int 6154 iwn_hw_prepare(struct iwn_softc *sc) 6155 { 6156 int ntries; 6157 6158 /* Check if hardware is ready. */ 6159 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6160 for (ntries = 0; ntries < 5; ntries++) { 6161 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6162 IWN_HW_IF_CONFIG_NIC_READY) 6163 return 0; 6164 DELAY(10); 6165 } 6166 6167 /* Hardware not ready, force into ready state. */ 6168 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6169 for (ntries = 0; ntries < 15000; ntries++) { 6170 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6171 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6172 break; 6173 DELAY(10); 6174 } 6175 if (ntries == 15000) 6176 return ETIMEDOUT; 6177 6178 /* Hardware should be ready now. */ 6179 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6180 for (ntries = 0; ntries < 5; ntries++) { 6181 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6182 IWN_HW_IF_CONFIG_NIC_READY) 6183 return 0; 6184 DELAY(10); 6185 } 6186 return ETIMEDOUT; 6187 } 6188 6189 static int 6190 iwn_hw_init(struct iwn_softc *sc) 6191 { 6192 const struct iwn_hal *hal = sc->sc_hal; 6193 int error, chnl, qid; 6194 6195 /* Clear pending interrupts. */ 6196 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6197 6198 error = iwn_apm_init(sc); 6199 if (error != 0) { 6200 device_printf(sc->sc_dev, 6201 "%s: could not power ON adapter, error %d\n", 6202 __func__, error); 6203 return error; 6204 } 6205 6206 /* Select VMAIN power source. */ 6207 error = iwn_nic_lock(sc); 6208 if (error != 0) 6209 return error; 6210 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6211 iwn_nic_unlock(sc); 6212 6213 /* Perform adapter-specific initialization. */ 6214 error = hal->nic_config(sc); 6215 if (error != 0) 6216 return error; 6217 6218 /* Initialize RX ring. */ 6219 error = iwn_nic_lock(sc); 6220 if (error != 0) 6221 return error; 6222 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6223 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6224 /* Set physical address of RX ring (256-byte aligned.) */ 6225 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6226 /* Set physical address of RX status (16-byte aligned.) */ 6227 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6228 /* Enable RX. */ 6229 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6230 IWN_FH_RX_CONFIG_ENA | 6231 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6232 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6233 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6234 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6235 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6236 iwn_nic_unlock(sc); 6237 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6238 6239 error = iwn_nic_lock(sc); 6240 if (error != 0) 6241 return error; 6242 6243 /* Initialize TX scheduler. */ 6244 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6245 6246 /* Set physical address of "keep warm" page (16-byte aligned.) */ 6247 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6248 6249 /* Initialize TX rings. */ 6250 for (qid = 0; qid < hal->ntxqs; qid++) { 6251 struct iwn_tx_ring *txq = &sc->txq[qid]; 6252 6253 /* Set physical address of TX ring (256-byte aligned.) */ 6254 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6255 txq->desc_dma.paddr >> 8); 6256 } 6257 iwn_nic_unlock(sc); 6258 6259 /* Enable DMA channels. */ 6260 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6261 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6262 IWN_FH_TX_CONFIG_DMA_ENA | 6263 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6264 } 6265 6266 /* Clear "radio off" and "commands blocked" bits. */ 6267 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6268 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6269 6270 /* Clear pending interrupts. */ 6271 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6272 /* Enable interrupt coalescing. */ 6273 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6274 /* Enable interrupts. */ 6275 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6276 6277 /* _Really_ make sure "radio off" bit is cleared! */ 6278 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6279 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6280 6281 error = hal->load_firmware(sc); 6282 if (error != 0) { 6283 device_printf(sc->sc_dev, 6284 "%s: could not load firmware, error %d\n", 6285 __func__, error); 6286 return error; 6287 } 6288 /* Wait at most one second for firmware alive notification. */ 6289 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 6290 if (error != 0) { 6291 device_printf(sc->sc_dev, 6292 "%s: timeout waiting for adapter to initialize, error %d\n", 6293 __func__, error); 6294 return error; 6295 } 6296 /* Do post-firmware initialization. */ 6297 return hal->post_alive(sc); 6298 } 6299 6300 static void 6301 iwn_hw_stop(struct iwn_softc *sc) 6302 { 6303 const struct iwn_hal *hal = sc->sc_hal; 6304 uint32_t tmp; 6305 int chnl, qid, ntries; 6306 6307 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6308 6309 /* Disable interrupts. */ 6310 IWN_WRITE(sc, IWN_INT_MASK, 0); 6311 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6312 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6313 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6314 6315 /* Make sure we no longer hold the NIC lock. */ 6316 iwn_nic_unlock(sc); 6317 6318 /* Stop TX scheduler. */ 6319 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6320 6321 /* Stop all DMA channels. */ 6322 if (iwn_nic_lock(sc) == 0) { 6323 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6324 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6325 for (ntries = 0; ntries < 200; ntries++) { 6326 tmp = IWN_READ(sc, IWN_FH_TX_STATUS); 6327 if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) == 6328 IWN_FH_TX_STATUS_IDLE(chnl)) 6329 break; 6330 DELAY(10); 6331 } 6332 } 6333 iwn_nic_unlock(sc); 6334 } 6335 6336 /* Stop RX ring. */ 6337 iwn_reset_rx_ring(sc, &sc->rxq); 6338 6339 /* Reset all TX rings. */ 6340 for (qid = 0; qid < hal->ntxqs; qid++) 6341 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6342 6343 if (iwn_nic_lock(sc) == 0) { 6344 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6345 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6346 iwn_nic_unlock(sc); 6347 } 6348 DELAY(5); 6349 6350 /* Power OFF adapter. */ 6351 iwn_apm_stop(sc); 6352 } 6353 6354 static void 6355 iwn_init_locked(struct iwn_softc *sc) 6356 { 6357 struct ifnet *ifp = sc->sc_ifp; 6358 int error; 6359 6360 IWN_LOCK_ASSERT(sc); 6361 6362 error = iwn_hw_prepare(sc); 6363 if (error != 0) { 6364 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n", 6365 __func__, error); 6366 goto fail; 6367 } 6368 6369 /* Initialize interrupt mask to default value. */ 6370 sc->int_mask = IWN_INT_MASK_DEF; 6371 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6372 6373 /* Check that the radio is not disabled by hardware switch. */ 6374 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6375 device_printf(sc->sc_dev, 6376 "radio is disabled by hardware switch\n"); 6377 6378 /* Enable interrupts to get RF toggle notifications. */ 6379 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6380 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6381 return; 6382 } 6383 6384 /* Read firmware images from the filesystem. */ 6385 error = iwn_read_firmware(sc); 6386 if (error != 0) { 6387 device_printf(sc->sc_dev, 6388 "%s: could not read firmware, error %d\n", 6389 __func__, error); 6390 goto fail; 6391 } 6392 6393 /* Initialize hardware and upload firmware. */ 6394 error = iwn_hw_init(sc); 6395 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6396 sc->fw_fp = NULL; 6397 if (error != 0) { 6398 device_printf(sc->sc_dev, 6399 "%s: could not initialize hardware, error %d\n", 6400 __func__, error); 6401 goto fail; 6402 } 6403 6404 /* Configure adapter now that it is ready. */ 6405 error = iwn_config(sc); 6406 if (error != 0) { 6407 device_printf(sc->sc_dev, 6408 "%s: could not configure device, error %d\n", 6409 __func__, error); 6410 goto fail; 6411 } 6412 6413 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6414 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6415 6416 return; 6417 6418 fail: 6419 iwn_stop_locked(sc); 6420 } 6421 6422 static void 6423 iwn_init(void *arg) 6424 { 6425 struct iwn_softc *sc = arg; 6426 struct ifnet *ifp = sc->sc_ifp; 6427 struct ieee80211com *ic = ifp->if_l2com; 6428 6429 IWN_LOCK(sc); 6430 iwn_init_locked(sc); 6431 IWN_UNLOCK(sc); 6432 6433 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6434 ieee80211_start_all(ic); 6435 } 6436 6437 static void 6438 iwn_stop_locked(struct iwn_softc *sc) 6439 { 6440 struct ifnet *ifp = sc->sc_ifp; 6441 6442 IWN_LOCK_ASSERT(sc); 6443 6444 sc->sc_tx_timer = 0; 6445 callout_stop(&sc->sc_timer_to); 6446 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 6447 6448 /* Power OFF hardware. */ 6449 iwn_hw_stop(sc); 6450 } 6451 6452 static void 6453 iwn_stop(struct iwn_softc *sc) 6454 { 6455 IWN_LOCK(sc); 6456 iwn_stop_locked(sc); 6457 IWN_UNLOCK(sc); 6458 } 6459 6460 /* 6461 * Callback from net80211 to start a scan. 6462 */ 6463 static void 6464 iwn_scan_start(struct ieee80211com *ic) 6465 { 6466 struct ifnet *ifp = ic->ic_ifp; 6467 struct iwn_softc *sc = ifp->if_softc; 6468 6469 IWN_LOCK(sc); 6470 /* make the link LED blink while we're scanning */ 6471 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 6472 IWN_UNLOCK(sc); 6473 } 6474 6475 /* 6476 * Callback from net80211 to terminate a scan. 6477 */ 6478 static void 6479 iwn_scan_end(struct ieee80211com *ic) 6480 { 6481 struct ifnet *ifp = ic->ic_ifp; 6482 struct iwn_softc *sc = ifp->if_softc; 6483 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6484 6485 IWN_LOCK(sc); 6486 if (vap->iv_state == IEEE80211_S_RUN) { 6487 /* Set link LED to ON status if we are associated */ 6488 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6489 } 6490 IWN_UNLOCK(sc); 6491 } 6492 6493 /* 6494 * Callback from net80211 to force a channel change. 6495 */ 6496 static void 6497 iwn_set_channel(struct ieee80211com *ic) 6498 { 6499 const struct ieee80211_channel *c = ic->ic_curchan; 6500 struct ifnet *ifp = ic->ic_ifp; 6501 struct iwn_softc *sc = ifp->if_softc; 6502 6503 IWN_LOCK(sc); 6504 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 6505 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 6506 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 6507 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 6508 IWN_UNLOCK(sc); 6509 } 6510 6511 /* 6512 * Callback from net80211 to start scanning of the current channel. 6513 */ 6514 static void 6515 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6516 { 6517 struct ieee80211vap *vap = ss->ss_vap; 6518 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6519 int error; 6520 6521 IWN_LOCK(sc); 6522 error = iwn_scan(sc); 6523 IWN_UNLOCK(sc); 6524 if (error != 0) 6525 ieee80211_cancel_scan(vap); 6526 } 6527 6528 /* 6529 * Callback from net80211 to handle the minimum dwell time being met. 6530 * The intent is to terminate the scan but we just let the firmware 6531 * notify us when it's finished as we have no safe way to abort it. 6532 */ 6533 static void 6534 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 6535 { 6536 /* NB: don't try to abort scan; wait for firmware to finish */ 6537 } 6538 6539 static struct iwn_eeprom_chan * 6540 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 6541 { 6542 int i, j; 6543 6544 for (j = 0; j < 7; j++) { 6545 for (i = 0; i < iwn_bands[j].nchan; i++) { 6546 if (iwn_bands[j].chan[i] == c->ic_ieee) 6547 return &sc->eeprom_channels[j][i]; 6548 } 6549 } 6550 6551 return NULL; 6552 } 6553 6554 /* 6555 * Enforce flags read from EEPROM. 6556 */ 6557 static int 6558 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 6559 int nchan, struct ieee80211_channel chans[]) 6560 { 6561 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6562 int i; 6563 6564 for (i = 0; i < nchan; i++) { 6565 struct ieee80211_channel *c = &chans[i]; 6566 struct iwn_eeprom_chan *channel; 6567 6568 channel = iwn_find_eeprom_channel(sc, c); 6569 if (channel == NULL) { 6570 if_printf(ic->ic_ifp, 6571 "%s: invalid channel %u freq %u/0x%x\n", 6572 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 6573 return EINVAL; 6574 } 6575 c->ic_flags |= iwn_eeprom_channel_flags(channel); 6576 } 6577 6578 return 0; 6579 } 6580 6581 static void 6582 iwn_hw_reset(void *arg0, int pending) 6583 { 6584 struct iwn_softc *sc = arg0; 6585 struct ifnet *ifp = sc->sc_ifp; 6586 struct ieee80211com *ic = ifp->if_l2com; 6587 6588 iwn_stop(sc); 6589 iwn_init(sc); 6590 ieee80211_notify_radio(ic, 1); 6591 } 6592 6593 static void 6594 iwn_radio_on(void *arg0, int pending) 6595 { 6596 struct iwn_softc *sc = arg0; 6597 struct ifnet *ifp = sc->sc_ifp; 6598 struct ieee80211com *ic = ifp->if_l2com; 6599 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6600 6601 if (vap != NULL) { 6602 iwn_init(sc); 6603 ieee80211_init(vap); 6604 } 6605 } 6606 6607 static void 6608 iwn_radio_off(void *arg0, int pending) 6609 { 6610 struct iwn_softc *sc = arg0; 6611 struct ifnet *ifp = sc->sc_ifp; 6612 struct ieee80211com *ic = ifp->if_l2com; 6613 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6614 6615 iwn_stop(sc); 6616 if (vap != NULL) 6617 ieee80211_stop(vap); 6618 6619 /* Enable interrupts to get RF toggle notification. */ 6620 IWN_LOCK(sc); 6621 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6622 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6623 IWN_UNLOCK(sc); 6624 } 6625 6626 static void 6627 iwn_sysctlattach(struct iwn_softc *sc) 6628 { 6629 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 6630 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 6631 6632 #ifdef IWN_DEBUG 6633 sc->sc_debug = 0; 6634 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6635 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 6636 #endif 6637 } 6638 6639 static int 6640 iwn_shutdown(device_t dev) 6641 { 6642 struct iwn_softc *sc = device_get_softc(dev); 6643 6644 iwn_stop(sc); 6645 return 0; 6646 } 6647 6648 static int 6649 iwn_suspend(device_t dev) 6650 { 6651 struct iwn_softc *sc = device_get_softc(dev); 6652 struct ifnet *ifp = sc->sc_ifp; 6653 struct ieee80211com *ic = ifp->if_l2com; 6654 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6655 6656 iwn_stop(sc); 6657 if (vap != NULL) 6658 ieee80211_stop(vap); 6659 return 0; 6660 } 6661 6662 static int 6663 iwn_resume(device_t dev) 6664 { 6665 struct iwn_softc *sc = device_get_softc(dev); 6666 struct ifnet *ifp = sc->sc_ifp; 6667 struct ieee80211com *ic = ifp->if_l2com; 6668 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6669 6670 /* Clear device-specific "PCI retry timeout" register (41h). */ 6671 pci_write_config(dev, 0x41, 0, 1); 6672 6673 if (ifp->if_flags & IFF_UP) { 6674 iwn_init(sc); 6675 if (vap != NULL) 6676 ieee80211_init(vap); 6677 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6678 iwn_start(ifp); 6679 } 6680 return 0; 6681 } 6682 6683 #ifdef IWN_DEBUG 6684 static const char * 6685 iwn_intr_str(uint8_t cmd) 6686 { 6687 switch (cmd) { 6688 /* Notifications */ 6689 case IWN_UC_READY: return "UC_READY"; 6690 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 6691 case IWN_TX_DONE: return "TX_DONE"; 6692 case IWN_START_SCAN: return "START_SCAN"; 6693 case IWN_STOP_SCAN: return "STOP_SCAN"; 6694 case IWN_RX_STATISTICS: return "RX_STATS"; 6695 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 6696 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 6697 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 6698 case IWN_RX_PHY: return "RX_PHY"; 6699 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 6700 case IWN_RX_DONE: return "RX_DONE"; 6701 6702 /* Command Notifications */ 6703 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 6704 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 6705 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 6706 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 6707 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 6708 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 6709 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 6710 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 6711 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 6712 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 6713 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 6714 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 6715 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 6716 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 6717 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 6718 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 6719 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 6720 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 6721 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 6722 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 6723 } 6724 return "UNKNOWN INTR NOTIF/CMD"; 6725 } 6726 #endif /* IWN_DEBUG */ 6727 6728 static device_method_t iwn_methods[] = { 6729 /* Device interface */ 6730 DEVMETHOD(device_probe, iwn_probe), 6731 DEVMETHOD(device_attach, iwn_attach), 6732 DEVMETHOD(device_detach, iwn_detach), 6733 DEVMETHOD(device_shutdown, iwn_shutdown), 6734 DEVMETHOD(device_suspend, iwn_suspend), 6735 DEVMETHOD(device_resume, iwn_resume), 6736 { 0, 0 } 6737 }; 6738 6739 static driver_t iwn_driver = { 6740 "iwn", 6741 iwn_methods, 6742 sizeof (struct iwn_softc) 6743 }; 6744 static devclass_t iwn_devclass; 6745 6746 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 6747 MODULE_DEPEND(iwn, pci, 1, 1, 1); 6748 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 6749 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 6750