1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 23 * adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/sockio.h> 31 #include <sys/sysctl.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/endian.h> 40 #include <sys/firmware.h> 41 #include <sys/limits.h> 42 #include <sys/module.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <machine/clock.h> 49 50 #include <dev/pci/pcireg.h> 51 #include <dev/pci/pcivar.h> 52 53 #include <net/bpf.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/if_ether.h> 65 #include <netinet/ip.h> 66 67 #include <net80211/ieee80211_var.h> 68 #include <net80211/ieee80211_radiotap.h> 69 #include <net80211/ieee80211_regdomain.h> 70 #include <net80211/ieee80211_ratectl.h> 71 72 #include <dev/iwn/if_iwnreg.h> 73 #include <dev/iwn/if_iwnvar.h> 74 75 static int iwn_probe(device_t); 76 static int iwn_attach(device_t); 77 static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *); 78 static void iwn_radiotap_attach(struct iwn_softc *); 79 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 80 const char name[IFNAMSIZ], int unit, int opmode, 81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 82 const uint8_t mac[IEEE80211_ADDR_LEN]); 83 static void iwn_vap_delete(struct ieee80211vap *); 84 static int iwn_cleanup(device_t); 85 static int iwn_detach(device_t); 86 static int iwn_nic_lock(struct iwn_softc *); 87 static int iwn_eeprom_lock(struct iwn_softc *); 88 static int iwn_init_otprom(struct iwn_softc *); 89 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 90 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 91 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 92 void **, bus_size_t, bus_size_t, int); 93 static void iwn_dma_contig_free(struct iwn_dma_info *); 94 static int iwn_alloc_sched(struct iwn_softc *); 95 static void iwn_free_sched(struct iwn_softc *); 96 static int iwn_alloc_kw(struct iwn_softc *); 97 static void iwn_free_kw(struct iwn_softc *); 98 static int iwn_alloc_ict(struct iwn_softc *); 99 static void iwn_free_ict(struct iwn_softc *); 100 static int iwn_alloc_fwmem(struct iwn_softc *); 101 static void iwn_free_fwmem(struct iwn_softc *); 102 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 103 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 104 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 105 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 106 int); 107 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 108 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 109 static void iwn5000_ict_reset(struct iwn_softc *); 110 static int iwn_read_eeprom(struct iwn_softc *, 111 uint8_t macaddr[IEEE80211_ADDR_LEN]); 112 static void iwn4965_read_eeprom(struct iwn_softc *); 113 static void iwn4965_print_power_group(struct iwn_softc *, int); 114 static void iwn5000_read_eeprom(struct iwn_softc *); 115 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 116 static void iwn_read_eeprom_band(struct iwn_softc *, int); 117 #if 0 /* HT */ 118 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 119 #endif 120 static void iwn_read_eeprom_channels(struct iwn_softc *, int, 121 uint32_t); 122 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 123 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 124 const uint8_t mac[IEEE80211_ADDR_LEN]); 125 static void iwn_newassoc(struct ieee80211_node *, int); 126 static int iwn_media_change(struct ifnet *); 127 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 128 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 129 struct iwn_rx_data *); 130 static void iwn_timer_timeout(void *); 131 static void iwn_calib_reset(struct iwn_softc *); 132 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 133 struct iwn_rx_data *); 134 #if 0 /* HT */ 135 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 136 struct iwn_rx_data *); 137 #endif 138 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 139 struct iwn_rx_data *); 140 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 141 struct iwn_rx_data *); 142 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 143 struct iwn_rx_data *); 144 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 145 uint8_t); 146 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 147 static void iwn_notif_intr(struct iwn_softc *); 148 static void iwn_wakeup_intr(struct iwn_softc *); 149 static void iwn_rftoggle_intr(struct iwn_softc *); 150 static void iwn_fatal_intr(struct iwn_softc *); 151 static void iwn_intr(void *); 152 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 153 uint16_t); 154 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 155 uint16_t); 156 #ifdef notyet 157 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 158 #endif 159 static uint8_t iwn_plcp_signal(int); 160 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 161 struct ieee80211_node *, struct iwn_tx_ring *); 162 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 163 const struct ieee80211_bpf_params *); 164 static void iwn_start(struct ifnet *); 165 static void iwn_start_locked(struct ifnet *); 166 static void iwn_watchdog(struct iwn_softc *sc); 167 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 168 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 169 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 170 int); 171 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 172 int); 173 static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int); 174 static int iwn_add_broadcast_node(struct iwn_softc *, int); 175 static int iwn_wme_update(struct ieee80211com *); 176 static void iwn_update_mcast(struct ifnet *); 177 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 178 static int iwn_set_critical_temp(struct iwn_softc *); 179 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 180 static void iwn4965_power_calibration(struct iwn_softc *, int); 181 static int iwn4965_set_txpower(struct iwn_softc *, 182 struct ieee80211_channel *, int); 183 static int iwn5000_set_txpower(struct iwn_softc *, 184 struct ieee80211_channel *, int); 185 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 186 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 187 static int iwn_get_noise(const struct iwn_rx_general_stats *); 188 static int iwn4965_get_temperature(struct iwn_softc *); 189 static int iwn5000_get_temperature(struct iwn_softc *); 190 static int iwn_init_sensitivity(struct iwn_softc *); 191 static void iwn_collect_noise(struct iwn_softc *, 192 const struct iwn_rx_general_stats *); 193 static int iwn4965_init_gains(struct iwn_softc *); 194 static int iwn5000_init_gains(struct iwn_softc *); 195 static int iwn4965_set_gains(struct iwn_softc *); 196 static int iwn5000_set_gains(struct iwn_softc *); 197 static void iwn_tune_sensitivity(struct iwn_softc *, 198 const struct iwn_rx_stats *); 199 static int iwn_send_sensitivity(struct iwn_softc *); 200 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 201 static int iwn_config(struct iwn_softc *); 202 static int iwn_scan(struct iwn_softc *); 203 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 204 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 205 #if 0 /* HT */ 206 static int iwn_ampdu_rx_start(struct ieee80211com *, 207 struct ieee80211_node *, uint8_t); 208 static void iwn_ampdu_rx_stop(struct ieee80211com *, 209 struct ieee80211_node *, uint8_t); 210 static int iwn_ampdu_tx_start(struct ieee80211com *, 211 struct ieee80211_node *, uint8_t); 212 static void iwn_ampdu_tx_stop(struct ieee80211com *, 213 struct ieee80211_node *, uint8_t); 214 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 215 struct ieee80211_node *, uint8_t, uint16_t); 216 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 217 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 218 struct ieee80211_node *, uint8_t, uint16_t); 219 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t); 220 #endif 221 static int iwn5000_send_calib_results(struct iwn_softc *); 222 static int iwn5000_save_calib_result(struct iwn_softc *, 223 struct iwn_phy_calib *, int, int); 224 static void iwn5000_free_calib_results(struct iwn_softc *); 225 static int iwn5000_chrystal_calib(struct iwn_softc *); 226 static int iwn5000_send_calib_query(struct iwn_softc *); 227 static int iwn5000_rx_calib_result(struct iwn_softc *, 228 struct iwn_rx_desc *, struct iwn_rx_data *); 229 static int iwn5000_send_wimax_coex(struct iwn_softc *); 230 static int iwn4965_post_alive(struct iwn_softc *); 231 static int iwn5000_post_alive(struct iwn_softc *); 232 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 233 int); 234 static int iwn4965_load_firmware(struct iwn_softc *); 235 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 236 const uint8_t *, int); 237 static int iwn5000_load_firmware(struct iwn_softc *); 238 static int iwn_read_firmware_leg(struct iwn_softc *, 239 struct iwn_fw_info *); 240 static int iwn_read_firmware_tlv(struct iwn_softc *, 241 struct iwn_fw_info *, uint16_t); 242 static int iwn_read_firmware(struct iwn_softc *); 243 static int iwn_clock_wait(struct iwn_softc *); 244 static int iwn_apm_init(struct iwn_softc *); 245 static void iwn_apm_stop_master(struct iwn_softc *); 246 static void iwn_apm_stop(struct iwn_softc *); 247 static int iwn4965_nic_config(struct iwn_softc *); 248 static int iwn5000_nic_config(struct iwn_softc *); 249 static int iwn_hw_prepare(struct iwn_softc *); 250 static int iwn_hw_init(struct iwn_softc *); 251 static void iwn_hw_stop(struct iwn_softc *); 252 static void iwn_init_locked(struct iwn_softc *); 253 static void iwn_init(void *); 254 static void iwn_stop_locked(struct iwn_softc *); 255 static void iwn_stop(struct iwn_softc *); 256 static void iwn_scan_start(struct ieee80211com *); 257 static void iwn_scan_end(struct ieee80211com *); 258 static void iwn_set_channel(struct ieee80211com *); 259 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 260 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 261 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 262 struct ieee80211_channel *); 263 static int iwn_setregdomain(struct ieee80211com *, 264 struct ieee80211_regdomain *, int, 265 struct ieee80211_channel []); 266 static void iwn_hw_reset(void *, int); 267 static void iwn_radio_on(void *, int); 268 static void iwn_radio_off(void *, int); 269 static void iwn_sysctlattach(struct iwn_softc *); 270 static int iwn_shutdown(device_t); 271 static int iwn_suspend(device_t); 272 static int iwn_resume(device_t); 273 274 #define IWN_DEBUG 275 #ifdef IWN_DEBUG 276 enum { 277 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 278 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 279 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 280 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 281 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 282 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 283 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 284 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 285 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 286 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 287 IWN_DEBUG_NODE = 0x00000400, /* node management */ 288 IWN_DEBUG_LED = 0x00000800, /* led management */ 289 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 290 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 291 IWN_DEBUG_ANY = 0xffffffff 292 }; 293 294 #define DPRINTF(sc, m, fmt, ...) do { \ 295 if (sc->sc_debug & (m)) \ 296 printf(fmt, __VA_ARGS__); \ 297 } while (0) 298 299 static const char *iwn_intr_str(uint8_t); 300 #else 301 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 302 #endif 303 304 struct iwn_ident { 305 uint16_t vendor; 306 uint16_t device; 307 const char *name; 308 }; 309 310 static const struct iwn_ident iwn_ident_table [] = { 311 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" }, 312 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" }, 313 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" }, 314 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" }, 315 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" }, 316 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" }, 317 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" }, 318 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" }, 319 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" }, 320 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" }, 321 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" }, 322 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" }, 323 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" }, 324 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" }, 325 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" }, 326 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" }, 327 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" }, 328 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" }, 329 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" }, 330 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" }, 331 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" }, 332 { 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" }, 333 { 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" }, 334 { 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" }, 335 #ifdef notyet 336 { 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" }, 337 { 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" }, 338 { 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" }, 339 { 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" }, 340 { 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" }, 341 #endif 342 { 0, 0, NULL } 343 }; 344 345 static const struct iwn_hal iwn4965_hal = { 346 iwn4965_load_firmware, 347 iwn4965_read_eeprom, 348 iwn4965_post_alive, 349 iwn4965_nic_config, 350 iwn4965_update_sched, 351 iwn4965_get_temperature, 352 iwn4965_get_rssi, 353 iwn4965_set_txpower, 354 iwn4965_init_gains, 355 iwn4965_set_gains, 356 iwn4965_add_node, 357 iwn4965_tx_done, 358 #if 0 /* HT */ 359 iwn4965_ampdu_tx_start, 360 iwn4965_ampdu_tx_stop, 361 #endif 362 IWN4965_NTXQUEUES, 363 IWN4965_NDMACHNLS, 364 IWN4965_ID_BROADCAST, 365 IWN4965_RXONSZ, 366 IWN4965_SCHEDSZ, 367 IWN4965_FW_TEXT_MAXSZ, 368 IWN4965_FW_DATA_MAXSZ, 369 IWN4965_FWSZ, 370 IWN4965_SCHED_TXFACT 371 }; 372 373 static const struct iwn_hal iwn5000_hal = { 374 iwn5000_load_firmware, 375 iwn5000_read_eeprom, 376 iwn5000_post_alive, 377 iwn5000_nic_config, 378 iwn5000_update_sched, 379 iwn5000_get_temperature, 380 iwn5000_get_rssi, 381 iwn5000_set_txpower, 382 iwn5000_init_gains, 383 iwn5000_set_gains, 384 iwn5000_add_node, 385 iwn5000_tx_done, 386 #if 0 /* HT */ 387 iwn5000_ampdu_tx_start, 388 iwn5000_ampdu_tx_stop, 389 #endif 390 IWN5000_NTXQUEUES, 391 IWN5000_NDMACHNLS, 392 IWN5000_ID_BROADCAST, 393 IWN5000_RXONSZ, 394 IWN5000_SCHEDSZ, 395 IWN5000_FW_TEXT_MAXSZ, 396 IWN5000_FW_DATA_MAXSZ, 397 IWN5000_FWSZ, 398 IWN5000_SCHED_TXFACT 399 }; 400 401 static int 402 iwn_probe(device_t dev) 403 { 404 const struct iwn_ident *ident; 405 406 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 407 if (pci_get_vendor(dev) == ident->vendor && 408 pci_get_device(dev) == ident->device) { 409 device_set_desc(dev, ident->name); 410 return 0; 411 } 412 } 413 return ENXIO; 414 } 415 416 static int 417 iwn_attach(device_t dev) 418 { 419 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 420 struct ieee80211com *ic; 421 struct ifnet *ifp; 422 const struct iwn_hal *hal; 423 uint32_t tmp; 424 int i, error, result; 425 uint8_t macaddr[IEEE80211_ADDR_LEN]; 426 427 sc->sc_dev = dev; 428 429 /* 430 * Get the offset of the PCI Express Capability Structure in PCI 431 * Configuration Space. 432 */ 433 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 434 if (error != 0) { 435 device_printf(dev, "PCIe capability structure not found!\n"); 436 return error; 437 } 438 439 /* Clear device-specific "PCI retry timeout" register (41h). */ 440 pci_write_config(dev, 0x41, 0, 1); 441 442 /* Hardware bug workaround. */ 443 tmp = pci_read_config(dev, PCIR_COMMAND, 1); 444 if (tmp & PCIM_CMD_INTxDIS) { 445 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 446 __func__); 447 tmp &= ~PCIM_CMD_INTxDIS; 448 pci_write_config(dev, PCIR_COMMAND, tmp, 1); 449 } 450 451 /* Enable bus-mastering. */ 452 pci_enable_busmaster(dev); 453 454 sc->mem_rid = PCIR_BAR(0); 455 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 456 RF_ACTIVE); 457 if (sc->mem == NULL ) { 458 device_printf(dev, "could not allocate memory resources\n"); 459 error = ENOMEM; 460 return error; 461 } 462 463 sc->sc_st = rman_get_bustag(sc->mem); 464 sc->sc_sh = rman_get_bushandle(sc->mem); 465 sc->irq_rid = 0; 466 if ((result = pci_msi_count(dev)) == 1 && 467 pci_alloc_msi(dev, &result) == 0) 468 sc->irq_rid = 1; 469 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 470 RF_ACTIVE | RF_SHAREABLE); 471 if (sc->irq == NULL) { 472 device_printf(dev, "could not allocate interrupt resource\n"); 473 error = ENOMEM; 474 goto fail; 475 } 476 477 IWN_LOCK_INIT(sc); 478 callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0); 479 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc ); 480 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc ); 481 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc ); 482 483 /* Attach Hardware Abstraction Layer. */ 484 hal = iwn_hal_attach(sc); 485 if (hal == NULL) { 486 error = ENXIO; /* XXX: Wrong error code? */ 487 goto fail; 488 } 489 490 error = iwn_hw_prepare(sc); 491 if (error != 0) { 492 device_printf(dev, "hardware not ready, error %d\n", error); 493 goto fail; 494 } 495 496 /* Allocate DMA memory for firmware transfers. */ 497 error = iwn_alloc_fwmem(sc); 498 if (error != 0) { 499 device_printf(dev, 500 "could not allocate memory for firmware, error %d\n", 501 error); 502 goto fail; 503 } 504 505 /* Allocate "Keep Warm" page. */ 506 error = iwn_alloc_kw(sc); 507 if (error != 0) { 508 device_printf(dev, 509 "could not allocate \"Keep Warm\" page, error %d\n", error); 510 goto fail; 511 } 512 513 /* Allocate ICT table for 5000 Series. */ 514 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 515 (error = iwn_alloc_ict(sc)) != 0) { 516 device_printf(dev, 517 "%s: could not allocate ICT table, error %d\n", 518 __func__, error); 519 goto fail; 520 } 521 522 /* Allocate TX scheduler "rings". */ 523 error = iwn_alloc_sched(sc); 524 if (error != 0) { 525 device_printf(dev, 526 "could not allocate TX scheduler rings, error %d\n", 527 error); 528 goto fail; 529 } 530 531 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */ 532 for (i = 0; i < hal->ntxqs; i++) { 533 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i); 534 if (error != 0) { 535 device_printf(dev, 536 "could not allocate Tx ring %d, error %d\n", 537 i, error); 538 goto fail; 539 } 540 } 541 542 /* Allocate RX ring. */ 543 error = iwn_alloc_rx_ring(sc, &sc->rxq); 544 if (error != 0 ){ 545 device_printf(dev, 546 "could not allocate Rx ring, error %d\n", error); 547 goto fail; 548 } 549 550 /* Clear pending interrupts. */ 551 IWN_WRITE(sc, IWN_INT, 0xffffffff); 552 553 /* Count the number of available chains. */ 554 sc->ntxchains = 555 ((sc->txchainmask >> 2) & 1) + 556 ((sc->txchainmask >> 1) & 1) + 557 ((sc->txchainmask >> 0) & 1); 558 sc->nrxchains = 559 ((sc->rxchainmask >> 2) & 1) + 560 ((sc->rxchainmask >> 1) & 1) + 561 ((sc->rxchainmask >> 0) & 1); 562 563 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 564 if (ifp == NULL) { 565 device_printf(dev, "can not allocate ifnet structure\n"); 566 goto fail; 567 } 568 ic = ifp->if_l2com; 569 570 ic->ic_ifp = ifp; 571 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 572 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 573 574 /* Set device capabilities. */ 575 ic->ic_caps = 576 IEEE80211_C_STA /* station mode supported */ 577 | IEEE80211_C_MONITOR /* monitor mode supported */ 578 | IEEE80211_C_TXPMGT /* tx power management */ 579 | IEEE80211_C_SHSLOT /* short slot time supported */ 580 | IEEE80211_C_WPA 581 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 582 | IEEE80211_C_BGSCAN /* background scanning */ 583 #if 0 584 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 585 #endif 586 | IEEE80211_C_WME /* WME */ 587 | IEEE80211_C_RATECTL /* use ratectl */ 588 ; 589 #if 0 /* HT */ 590 /* XXX disable until HT channel setup works */ 591 ic->ic_htcaps = 592 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ 593 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ 594 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 595 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 596 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ 597 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 598 /* s/w capabilities */ 599 | IEEE80211_HTC_HT /* HT operation */ 600 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 601 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 602 ; 603 604 /* Set HT capabilities. */ 605 ic->ic_htcaps = 606 #if IWN_RBUF_SIZE == 8192 607 IEEE80211_HTCAP_AMSDU7935 | 608 #endif 609 IEEE80211_HTCAP_CBW20_40 | 610 IEEE80211_HTCAP_SGI20 | 611 IEEE80211_HTCAP_SGI40; 612 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 613 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 614 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 615 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 616 else 617 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 618 #endif 619 620 /* Read MAC address, channels, etc from EEPROM. */ 621 error = iwn_read_eeprom(sc, macaddr); 622 if (error != 0) { 623 device_printf(dev, "could not read EEPROM, error %d\n", 624 error); 625 goto fail; 626 } 627 628 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n", 629 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 630 macaddr, ":"); 631 632 #if 0 /* HT */ 633 /* Set supported HT rates. */ 634 ic->ic_sup_mcs[0] = 0xff; 635 if (sc->nrxchains > 1) 636 ic->ic_sup_mcs[1] = 0xff; 637 if (sc->nrxchains > 2) 638 ic->ic_sup_mcs[2] = 0xff; 639 #endif 640 641 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 642 ifp->if_softc = sc; 643 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 644 ifp->if_init = iwn_init; 645 ifp->if_ioctl = iwn_ioctl; 646 ifp->if_start = iwn_start; 647 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 648 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 649 IFQ_SET_READY(&ifp->if_snd); 650 651 ieee80211_ifattach(ic, macaddr); 652 ic->ic_vap_create = iwn_vap_create; 653 ic->ic_vap_delete = iwn_vap_delete; 654 ic->ic_raw_xmit = iwn_raw_xmit; 655 ic->ic_node_alloc = iwn_node_alloc; 656 ic->ic_newassoc = iwn_newassoc; 657 ic->ic_wme.wme_update = iwn_wme_update; 658 ic->ic_update_mcast = iwn_update_mcast; 659 ic->ic_scan_start = iwn_scan_start; 660 ic->ic_scan_end = iwn_scan_end; 661 ic->ic_set_channel = iwn_set_channel; 662 ic->ic_scan_curchan = iwn_scan_curchan; 663 ic->ic_scan_mindwell = iwn_scan_mindwell; 664 ic->ic_setregdomain = iwn_setregdomain; 665 #if 0 /* HT */ 666 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 667 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 668 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 669 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 670 #endif 671 672 iwn_radiotap_attach(sc); 673 iwn_sysctlattach(sc); 674 675 /* 676 * Hook our interrupt after all initialization is complete. 677 */ 678 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 679 NULL, iwn_intr, sc, &sc->sc_ih); 680 if (error != 0) { 681 device_printf(dev, "could not set up interrupt, error %d\n", 682 error); 683 goto fail; 684 } 685 686 ieee80211_announce(ic); 687 return 0; 688 fail: 689 iwn_cleanup(dev); 690 return error; 691 } 692 693 static const struct iwn_hal * 694 iwn_hal_attach(struct iwn_softc *sc) 695 { 696 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 697 698 switch (sc->hw_type) { 699 case IWN_HW_REV_TYPE_4965: 700 sc->sc_hal = &iwn4965_hal; 701 sc->limits = &iwn4965_sensitivity_limits; 702 sc->fwname = "iwn4965fw"; 703 sc->txchainmask = IWN_ANT_AB; 704 sc->rxchainmask = IWN_ANT_ABC; 705 break; 706 case IWN_HW_REV_TYPE_5100: 707 sc->sc_hal = &iwn5000_hal; 708 sc->limits = &iwn5000_sensitivity_limits; 709 sc->fwname = "iwn5000fw"; 710 sc->txchainmask = IWN_ANT_B; 711 sc->rxchainmask = IWN_ANT_AB; 712 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 713 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 714 IWN_CALIB_BASE_BAND; 715 break; 716 case IWN_HW_REV_TYPE_5150: 717 sc->sc_hal = &iwn5000_hal; 718 sc->limits = &iwn5150_sensitivity_limits; 719 sc->fwname = "iwn5150fw"; 720 sc->txchainmask = IWN_ANT_A; 721 sc->rxchainmask = IWN_ANT_AB; 722 sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO | 723 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 724 break; 725 case IWN_HW_REV_TYPE_5300: 726 case IWN_HW_REV_TYPE_5350: 727 sc->sc_hal = &iwn5000_hal; 728 sc->limits = &iwn5000_sensitivity_limits; 729 sc->fwname = "iwn5000fw"; 730 sc->txchainmask = IWN_ANT_ABC; 731 sc->rxchainmask = IWN_ANT_ABC; 732 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 733 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 734 IWN_CALIB_BASE_BAND; 735 break; 736 case IWN_HW_REV_TYPE_1000: 737 sc->sc_hal = &iwn5000_hal; 738 sc->limits = &iwn1000_sensitivity_limits; 739 sc->fwname = "iwn1000fw"; 740 sc->txchainmask = IWN_ANT_A; 741 sc->rxchainmask = IWN_ANT_AB; 742 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 743 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC | 744 IWN_CALIB_BASE_BAND; 745 break; 746 case IWN_HW_REV_TYPE_6000: 747 sc->sc_hal = &iwn5000_hal; 748 sc->limits = &iwn6000_sensitivity_limits; 749 sc->fwname = "iwn6000fw"; 750 switch (pci_get_device(sc->sc_dev)) { 751 case 0x422C: 752 case 0x4239: 753 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 754 sc->txchainmask = IWN_ANT_BC; 755 sc->rxchainmask = IWN_ANT_BC; 756 break; 757 default: 758 sc->txchainmask = IWN_ANT_ABC; 759 sc->rxchainmask = IWN_ANT_ABC; 760 break; 761 } 762 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 763 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 764 break; 765 case IWN_HW_REV_TYPE_6050: 766 sc->sc_hal = &iwn5000_hal; 767 sc->limits = &iwn6000_sensitivity_limits; 768 sc->fwname = "iwn6050fw"; 769 sc->txchainmask = IWN_ANT_AB; 770 sc->rxchainmask = IWN_ANT_AB; 771 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_DC | IWN_CALIB_LO | 772 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 773 break; 774 case IWN_HW_REV_TYPE_6005: 775 sc->sc_hal = &iwn5000_hal; 776 sc->limits = &iwn6000_sensitivity_limits; 777 sc->fwname = "iwn6005fw"; 778 sc->txchainmask = IWN_ANT_AB; 779 sc->rxchainmask = IWN_ANT_AB; 780 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO | 781 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND; 782 break; 783 default: 784 device_printf(sc->sc_dev, "adapter type %d not supported\n", 785 sc->hw_type); 786 return NULL; 787 } 788 return sc->sc_hal; 789 } 790 791 /* 792 * Attach the interface to 802.11 radiotap. 793 */ 794 static void 795 iwn_radiotap_attach(struct iwn_softc *sc) 796 { 797 struct ifnet *ifp = sc->sc_ifp; 798 struct ieee80211com *ic = ifp->if_l2com; 799 800 ieee80211_radiotap_attach(ic, 801 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 802 IWN_TX_RADIOTAP_PRESENT, 803 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 804 IWN_RX_RADIOTAP_PRESENT); 805 } 806 807 static struct ieee80211vap * 808 iwn_vap_create(struct ieee80211com *ic, 809 const char name[IFNAMSIZ], int unit, int opmode, int flags, 810 const uint8_t bssid[IEEE80211_ADDR_LEN], 811 const uint8_t mac[IEEE80211_ADDR_LEN]) 812 { 813 struct iwn_vap *ivp; 814 struct ieee80211vap *vap; 815 816 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 817 return NULL; 818 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 819 M_80211_VAP, M_NOWAIT | M_ZERO); 820 if (ivp == NULL) 821 return NULL; 822 vap = &ivp->iv_vap; 823 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 824 vap->iv_bmissthreshold = 10; /* override default */ 825 /* Override with driver methods. */ 826 ivp->iv_newstate = vap->iv_newstate; 827 vap->iv_newstate = iwn_newstate; 828 829 ieee80211_ratectl_init(vap); 830 /* Complete setup. */ 831 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 832 ic->ic_opmode = opmode; 833 return vap; 834 } 835 836 static void 837 iwn_vap_delete(struct ieee80211vap *vap) 838 { 839 struct iwn_vap *ivp = IWN_VAP(vap); 840 841 ieee80211_ratectl_deinit(vap); 842 ieee80211_vap_detach(vap); 843 free(ivp, M_80211_VAP); 844 } 845 846 static int 847 iwn_cleanup(device_t dev) 848 { 849 struct iwn_softc *sc = device_get_softc(dev); 850 struct ifnet *ifp = sc->sc_ifp; 851 struct ieee80211com *ic; 852 int i; 853 854 if (ifp != NULL) { 855 ic = ifp->if_l2com; 856 857 ieee80211_draintask(ic, &sc->sc_reinit_task); 858 ieee80211_draintask(ic, &sc->sc_radioon_task); 859 ieee80211_draintask(ic, &sc->sc_radiooff_task); 860 861 iwn_stop(sc); 862 callout_drain(&sc->sc_timer_to); 863 ieee80211_ifdetach(ic); 864 } 865 866 iwn5000_free_calib_results(sc); 867 868 /* Free DMA resources. */ 869 iwn_free_rx_ring(sc, &sc->rxq); 870 if (sc->sc_hal != NULL) 871 for (i = 0; i < sc->sc_hal->ntxqs; i++) 872 iwn_free_tx_ring(sc, &sc->txq[i]); 873 iwn_free_sched(sc); 874 iwn_free_kw(sc); 875 if (sc->ict != NULL) 876 iwn_free_ict(sc); 877 iwn_free_fwmem(sc); 878 879 if (sc->irq != NULL) { 880 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 881 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 882 if (sc->irq_rid == 1) 883 pci_release_msi(dev); 884 } 885 886 if (sc->mem != NULL) 887 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 888 889 if (ifp != NULL) 890 if_free(ifp); 891 892 IWN_LOCK_DESTROY(sc); 893 return 0; 894 } 895 896 static int 897 iwn_detach(device_t dev) 898 { 899 iwn_cleanup(dev); 900 return 0; 901 } 902 903 static int 904 iwn_nic_lock(struct iwn_softc *sc) 905 { 906 int ntries; 907 908 /* Request exclusive access to NIC. */ 909 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 910 911 /* Spin until we actually get the lock. */ 912 for (ntries = 0; ntries < 1000; ntries++) { 913 if ((IWN_READ(sc, IWN_GP_CNTRL) & 914 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 915 IWN_GP_CNTRL_MAC_ACCESS_ENA) 916 return 0; 917 DELAY(10); 918 } 919 return ETIMEDOUT; 920 } 921 922 static __inline void 923 iwn_nic_unlock(struct iwn_softc *sc) 924 { 925 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 926 } 927 928 static __inline uint32_t 929 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 930 { 931 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 932 IWN_BARRIER_READ_WRITE(sc); 933 return IWN_READ(sc, IWN_PRPH_RDATA); 934 } 935 936 static __inline void 937 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 938 { 939 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 940 IWN_BARRIER_WRITE(sc); 941 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 942 } 943 944 static __inline void 945 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 946 { 947 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 948 } 949 950 static __inline void 951 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 952 { 953 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 954 } 955 956 static __inline void 957 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 958 const uint32_t *data, int count) 959 { 960 for (; count > 0; count--, data++, addr += 4) 961 iwn_prph_write(sc, addr, *data); 962 } 963 964 static __inline uint32_t 965 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 966 { 967 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 968 IWN_BARRIER_READ_WRITE(sc); 969 return IWN_READ(sc, IWN_MEM_RDATA); 970 } 971 972 static __inline void 973 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 974 { 975 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 976 IWN_BARRIER_WRITE(sc); 977 IWN_WRITE(sc, IWN_MEM_WDATA, data); 978 } 979 980 static __inline void 981 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 982 { 983 uint32_t tmp; 984 985 tmp = iwn_mem_read(sc, addr & ~3); 986 if (addr & 3) 987 tmp = (tmp & 0x0000ffff) | data << 16; 988 else 989 tmp = (tmp & 0xffff0000) | data; 990 iwn_mem_write(sc, addr & ~3, tmp); 991 } 992 993 static __inline void 994 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 995 int count) 996 { 997 for (; count > 0; count--, addr += 4) 998 *data++ = iwn_mem_read(sc, addr); 999 } 1000 1001 static __inline void 1002 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1003 int count) 1004 { 1005 for (; count > 0; count--, addr += 4) 1006 iwn_mem_write(sc, addr, val); 1007 } 1008 1009 static int 1010 iwn_eeprom_lock(struct iwn_softc *sc) 1011 { 1012 int i, ntries; 1013 1014 for (i = 0; i < 100; i++) { 1015 /* Request exclusive access to EEPROM. */ 1016 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1017 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1018 1019 /* Spin until we actually get the lock. */ 1020 for (ntries = 0; ntries < 100; ntries++) { 1021 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1022 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1023 return 0; 1024 DELAY(10); 1025 } 1026 } 1027 return ETIMEDOUT; 1028 } 1029 1030 static __inline void 1031 iwn_eeprom_unlock(struct iwn_softc *sc) 1032 { 1033 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1034 } 1035 1036 /* 1037 * Initialize access by host to One Time Programmable ROM. 1038 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1039 */ 1040 static int 1041 iwn_init_otprom(struct iwn_softc *sc) 1042 { 1043 uint16_t prev, base, next; 1044 int count, error; 1045 1046 /* Wait for clock stabilization before accessing prph. */ 1047 error = iwn_clock_wait(sc); 1048 if (error != 0) 1049 return error; 1050 1051 error = iwn_nic_lock(sc); 1052 if (error != 0) 1053 return error; 1054 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1055 DELAY(5); 1056 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1057 iwn_nic_unlock(sc); 1058 1059 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1060 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1061 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1062 IWN_RESET_LINK_PWR_MGMT_DIS); 1063 } 1064 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1065 /* Clear ECC status. */ 1066 IWN_SETBITS(sc, IWN_OTP_GP, 1067 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1068 1069 /* 1070 * Find the block before last block (contains the EEPROM image) 1071 * for HW without OTP shadow RAM. 1072 */ 1073 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1074 /* Switch to absolute addressing mode. */ 1075 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1076 base = prev = 0; 1077 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1078 error = iwn_read_prom_data(sc, base, &next, 2); 1079 if (error != 0) 1080 return error; 1081 if (next == 0) /* End of linked-list. */ 1082 break; 1083 prev = base; 1084 base = le16toh(next); 1085 } 1086 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1087 return EIO; 1088 /* Skip "next" word. */ 1089 sc->prom_base = prev + 1; 1090 } 1091 return 0; 1092 } 1093 1094 static int 1095 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1096 { 1097 uint32_t val, tmp; 1098 int ntries; 1099 uint8_t *out = data; 1100 1101 addr += sc->prom_base; 1102 for (; count > 0; count -= 2, addr++) { 1103 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1104 for (ntries = 0; ntries < 10; ntries++) { 1105 val = IWN_READ(sc, IWN_EEPROM); 1106 if (val & IWN_EEPROM_READ_VALID) 1107 break; 1108 DELAY(5); 1109 } 1110 if (ntries == 10) { 1111 device_printf(sc->sc_dev, 1112 "timeout reading ROM at 0x%x\n", addr); 1113 return ETIMEDOUT; 1114 } 1115 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1116 /* OTPROM, check for ECC errors. */ 1117 tmp = IWN_READ(sc, IWN_OTP_GP); 1118 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1119 device_printf(sc->sc_dev, 1120 "OTPROM ECC error at 0x%x\n", addr); 1121 return EIO; 1122 } 1123 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1124 /* Correctable ECC error, clear bit. */ 1125 IWN_SETBITS(sc, IWN_OTP_GP, 1126 IWN_OTP_GP_ECC_CORR_STTS); 1127 } 1128 } 1129 *out++ = val >> 16; 1130 if (count > 1) 1131 *out++ = val >> 24; 1132 } 1133 return 0; 1134 } 1135 1136 static void 1137 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1138 { 1139 if (error != 0) 1140 return; 1141 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1142 *(bus_addr_t *)arg = segs[0].ds_addr; 1143 } 1144 1145 static int 1146 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1147 void **kvap, bus_size_t size, bus_size_t alignment, int flags) 1148 { 1149 int error; 1150 1151 dma->size = size; 1152 dma->tag = NULL; 1153 1154 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1155 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1156 1, size, flags, NULL, NULL, &dma->tag); 1157 if (error != 0) { 1158 device_printf(sc->sc_dev, 1159 "%s: bus_dma_tag_create failed, error %d\n", 1160 __func__, error); 1161 goto fail; 1162 } 1163 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1164 flags | BUS_DMA_ZERO, &dma->map); 1165 if (error != 0) { 1166 device_printf(sc->sc_dev, 1167 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error); 1168 goto fail; 1169 } 1170 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, 1171 size, iwn_dma_map_addr, &dma->paddr, flags); 1172 if (error != 0) { 1173 device_printf(sc->sc_dev, 1174 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1175 goto fail; 1176 } 1177 1178 if (kvap != NULL) 1179 *kvap = dma->vaddr; 1180 return 0; 1181 fail: 1182 iwn_dma_contig_free(dma); 1183 return error; 1184 } 1185 1186 static void 1187 iwn_dma_contig_free(struct iwn_dma_info *dma) 1188 { 1189 if (dma->tag != NULL) { 1190 if (dma->map != NULL) { 1191 if (dma->paddr == 0) { 1192 bus_dmamap_sync(dma->tag, dma->map, 1193 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1194 bus_dmamap_unload(dma->tag, dma->map); 1195 } 1196 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); 1197 } 1198 bus_dma_tag_destroy(dma->tag); 1199 } 1200 } 1201 1202 static int 1203 iwn_alloc_sched(struct iwn_softc *sc) 1204 { 1205 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1206 return iwn_dma_contig_alloc(sc, &sc->sched_dma, 1207 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT); 1208 } 1209 1210 static void 1211 iwn_free_sched(struct iwn_softc *sc) 1212 { 1213 iwn_dma_contig_free(&sc->sched_dma); 1214 } 1215 1216 static int 1217 iwn_alloc_kw(struct iwn_softc *sc) 1218 { 1219 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1220 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096, 1221 BUS_DMA_NOWAIT); 1222 } 1223 1224 static void 1225 iwn_free_kw(struct iwn_softc *sc) 1226 { 1227 iwn_dma_contig_free(&sc->kw_dma); 1228 } 1229 1230 static int 1231 iwn_alloc_ict(struct iwn_softc *sc) 1232 { 1233 /* ICT table must be aligned on a 4KB boundary. */ 1234 return iwn_dma_contig_alloc(sc, &sc->ict_dma, 1235 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT); 1236 } 1237 1238 static void 1239 iwn_free_ict(struct iwn_softc *sc) 1240 { 1241 iwn_dma_contig_free(&sc->ict_dma); 1242 } 1243 1244 static int 1245 iwn_alloc_fwmem(struct iwn_softc *sc) 1246 { 1247 /* Must be aligned on a 16-byte boundary. */ 1248 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, 1249 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT); 1250 } 1251 1252 static void 1253 iwn_free_fwmem(struct iwn_softc *sc) 1254 { 1255 iwn_dma_contig_free(&sc->fw_dma); 1256 } 1257 1258 static int 1259 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1260 { 1261 bus_size_t size; 1262 int i, error; 1263 1264 ring->cur = 0; 1265 1266 /* Allocate RX descriptors (256-byte aligned). */ 1267 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1268 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1269 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1270 if (error != 0) { 1271 device_printf(sc->sc_dev, 1272 "%s: could not allocate Rx ring DMA memory, error %d\n", 1273 __func__, error); 1274 goto fail; 1275 } 1276 1277 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1278 BUS_SPACE_MAXADDR_32BIT, 1279 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, 1280 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1281 if (error != 0) { 1282 device_printf(sc->sc_dev, 1283 "%s: bus_dma_tag_create_failed, error %d\n", 1284 __func__, error); 1285 goto fail; 1286 } 1287 1288 /* Allocate RX status area (16-byte aligned). */ 1289 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, 1290 (void **)&ring->stat, sizeof (struct iwn_rx_status), 1291 16, BUS_DMA_NOWAIT); 1292 if (error != 0) { 1293 device_printf(sc->sc_dev, 1294 "%s: could not allocate Rx status DMA memory, error %d\n", 1295 __func__, error); 1296 goto fail; 1297 } 1298 1299 /* 1300 * Allocate and map RX buffers. 1301 */ 1302 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1303 struct iwn_rx_data *data = &ring->data[i]; 1304 bus_addr_t paddr; 1305 1306 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1307 if (error != 0) { 1308 device_printf(sc->sc_dev, 1309 "%s: bus_dmamap_create failed, error %d\n", 1310 __func__, error); 1311 goto fail; 1312 } 1313 1314 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1315 if (data->m == NULL) { 1316 device_printf(sc->sc_dev, 1317 "%s: could not allocate rx mbuf\n", __func__); 1318 error = ENOMEM; 1319 goto fail; 1320 } 1321 1322 /* Map page. */ 1323 error = bus_dmamap_load(ring->data_dmat, data->map, 1324 mtod(data->m, caddr_t), MJUMPAGESIZE, 1325 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1326 if (error != 0 && error != EFBIG) { 1327 device_printf(sc->sc_dev, 1328 "%s: bus_dmamap_load failed, error %d\n", 1329 __func__, error); 1330 m_freem(data->m); 1331 error = ENOMEM; /* XXX unique code */ 1332 goto fail; 1333 } 1334 bus_dmamap_sync(ring->data_dmat, data->map, 1335 BUS_DMASYNC_PREWRITE); 1336 1337 /* Set physical address of RX buffer (256-byte aligned). */ 1338 ring->desc[i] = htole32(paddr >> 8); 1339 } 1340 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1341 BUS_DMASYNC_PREWRITE); 1342 return 0; 1343 fail: 1344 iwn_free_rx_ring(sc, ring); 1345 return error; 1346 } 1347 1348 static void 1349 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1350 { 1351 int ntries; 1352 1353 if (iwn_nic_lock(sc) == 0) { 1354 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1355 for (ntries = 0; ntries < 1000; ntries++) { 1356 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1357 IWN_FH_RX_STATUS_IDLE) 1358 break; 1359 DELAY(10); 1360 } 1361 iwn_nic_unlock(sc); 1362 #ifdef IWN_DEBUG 1363 if (ntries == 1000) 1364 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 1365 "timeout resetting Rx ring"); 1366 #endif 1367 } 1368 ring->cur = 0; 1369 sc->last_rx_valid = 0; 1370 } 1371 1372 static void 1373 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1374 { 1375 int i; 1376 1377 iwn_dma_contig_free(&ring->desc_dma); 1378 iwn_dma_contig_free(&ring->stat_dma); 1379 1380 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1381 struct iwn_rx_data *data = &ring->data[i]; 1382 1383 if (data->m != NULL) { 1384 bus_dmamap_sync(ring->data_dmat, data->map, 1385 BUS_DMASYNC_POSTREAD); 1386 bus_dmamap_unload(ring->data_dmat, data->map); 1387 m_freem(data->m); 1388 } 1389 if (data->map != NULL) 1390 bus_dmamap_destroy(ring->data_dmat, data->map); 1391 } 1392 } 1393 1394 static int 1395 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1396 { 1397 bus_size_t size; 1398 bus_addr_t paddr; 1399 int i, error; 1400 1401 ring->qid = qid; 1402 ring->queued = 0; 1403 ring->cur = 0; 1404 1405 /* Allocate TX descriptors (256-byte aligned.) */ 1406 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc); 1407 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, 1408 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT); 1409 if (error != 0) { 1410 device_printf(sc->sc_dev, 1411 "%s: could not allocate TX ring DMA memory, error %d\n", 1412 __func__, error); 1413 goto fail; 1414 } 1415 1416 /* 1417 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1418 * to allocate commands space for other rings. 1419 */ 1420 if (qid > 4) 1421 return 0; 1422 1423 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd); 1424 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, 1425 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT); 1426 if (error != 0) { 1427 device_printf(sc->sc_dev, 1428 "%s: could not allocate TX cmd DMA memory, error %d\n", 1429 __func__, error); 1430 goto fail; 1431 } 1432 1433 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1434 BUS_SPACE_MAXADDR_32BIT, 1435 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, 1436 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); 1437 if (error != 0) { 1438 device_printf(sc->sc_dev, 1439 "%s: bus_dma_tag_create_failed, error %d\n", 1440 __func__, error); 1441 goto fail; 1442 } 1443 1444 paddr = ring->cmd_dma.paddr; 1445 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1446 struct iwn_tx_data *data = &ring->data[i]; 1447 1448 data->cmd_paddr = paddr; 1449 data->scratch_paddr = paddr + 12; 1450 paddr += sizeof (struct iwn_tx_cmd); 1451 1452 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1453 if (error != 0) { 1454 device_printf(sc->sc_dev, 1455 "%s: bus_dmamap_create failed, error %d\n", 1456 __func__, error); 1457 goto fail; 1458 } 1459 bus_dmamap_sync(ring->data_dmat, data->map, 1460 BUS_DMASYNC_PREWRITE); 1461 } 1462 return 0; 1463 fail: 1464 iwn_free_tx_ring(sc, ring); 1465 return error; 1466 } 1467 1468 static void 1469 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1470 { 1471 int i; 1472 1473 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1474 struct iwn_tx_data *data = &ring->data[i]; 1475 1476 if (data->m != NULL) { 1477 bus_dmamap_unload(ring->data_dmat, data->map); 1478 m_freem(data->m); 1479 data->m = NULL; 1480 } 1481 } 1482 /* Clear TX descriptors. */ 1483 memset(ring->desc, 0, ring->desc_dma.size); 1484 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1485 BUS_DMASYNC_PREWRITE); 1486 sc->qfullmsk &= ~(1 << ring->qid); 1487 ring->queued = 0; 1488 ring->cur = 0; 1489 } 1490 1491 static void 1492 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1493 { 1494 int i; 1495 1496 iwn_dma_contig_free(&ring->desc_dma); 1497 iwn_dma_contig_free(&ring->cmd_dma); 1498 1499 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1500 struct iwn_tx_data *data = &ring->data[i]; 1501 1502 if (data->m != NULL) { 1503 bus_dmamap_sync(ring->data_dmat, data->map, 1504 BUS_DMASYNC_POSTWRITE); 1505 bus_dmamap_unload(ring->data_dmat, data->map); 1506 m_freem(data->m); 1507 } 1508 if (data->map != NULL) 1509 bus_dmamap_destroy(ring->data_dmat, data->map); 1510 } 1511 } 1512 1513 static void 1514 iwn5000_ict_reset(struct iwn_softc *sc) 1515 { 1516 /* Disable interrupts. */ 1517 IWN_WRITE(sc, IWN_INT_MASK, 0); 1518 1519 /* Reset ICT table. */ 1520 memset(sc->ict, 0, IWN_ICT_SIZE); 1521 sc->ict_cur = 0; 1522 1523 /* Set physical address of ICT table (4KB aligned.) */ 1524 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1525 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1526 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1527 1528 /* Enable periodic RX interrupt. */ 1529 sc->int_mask |= IWN_INT_RX_PERIODIC; 1530 /* Switch to ICT interrupt mode in driver. */ 1531 sc->sc_flags |= IWN_FLAG_USE_ICT; 1532 1533 /* Re-enable interrupts. */ 1534 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1535 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1536 } 1537 1538 static int 1539 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1540 { 1541 const struct iwn_hal *hal = sc->sc_hal; 1542 int error; 1543 uint16_t val; 1544 1545 /* Check whether adapter has an EEPROM or an OTPROM. */ 1546 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1547 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1548 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1549 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1550 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1551 1552 /* Adapter has to be powered on for EEPROM access to work. */ 1553 error = iwn_apm_init(sc); 1554 if (error != 0) { 1555 device_printf(sc->sc_dev, 1556 "%s: could not power ON adapter, error %d\n", 1557 __func__, error); 1558 return error; 1559 } 1560 1561 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1562 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1563 return EIO; 1564 } 1565 error = iwn_eeprom_lock(sc); 1566 if (error != 0) { 1567 device_printf(sc->sc_dev, 1568 "%s: could not lock ROM, error %d\n", 1569 __func__, error); 1570 return error; 1571 } 1572 1573 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1574 error = iwn_init_otprom(sc); 1575 if (error != 0) { 1576 device_printf(sc->sc_dev, 1577 "%s: could not initialize OTPROM, error %d\n", 1578 __func__, error); 1579 return error; 1580 } 1581 } 1582 1583 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1584 sc->rfcfg = le16toh(val); 1585 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1586 1587 /* Read MAC address. */ 1588 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1589 1590 /* Read adapter-specific information from EEPROM. */ 1591 hal->read_eeprom(sc); 1592 1593 iwn_apm_stop(sc); /* Power OFF adapter. */ 1594 1595 iwn_eeprom_unlock(sc); 1596 return 0; 1597 } 1598 1599 static void 1600 iwn4965_read_eeprom(struct iwn_softc *sc) 1601 { 1602 uint32_t addr; 1603 int i; 1604 uint16_t val; 1605 1606 /* Read regulatory domain (4 ASCII characters.) */ 1607 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1608 1609 /* Read the list of authorized channels (20MHz ones only.) */ 1610 for (i = 0; i < 5; i++) { 1611 addr = iwn4965_regulatory_bands[i]; 1612 iwn_read_eeprom_channels(sc, i, addr); 1613 } 1614 1615 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1616 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1617 sc->maxpwr2GHz = val & 0xff; 1618 sc->maxpwr5GHz = val >> 8; 1619 /* Check that EEPROM values are within valid range. */ 1620 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1621 sc->maxpwr5GHz = 38; 1622 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1623 sc->maxpwr2GHz = 38; 1624 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1625 sc->maxpwr2GHz, sc->maxpwr5GHz); 1626 1627 /* Read samples for each TX power group. */ 1628 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1629 sizeof sc->bands); 1630 1631 /* Read voltage at which samples were taken. */ 1632 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1633 sc->eeprom_voltage = (int16_t)le16toh(val); 1634 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1635 sc->eeprom_voltage); 1636 1637 #ifdef IWN_DEBUG 1638 /* Print samples. */ 1639 if (sc->sc_debug & IWN_DEBUG_ANY) { 1640 for (i = 0; i < IWN_NBANDS; i++) 1641 iwn4965_print_power_group(sc, i); 1642 } 1643 #endif 1644 } 1645 1646 #ifdef IWN_DEBUG 1647 static void 1648 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1649 { 1650 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1651 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1652 int j, c; 1653 1654 printf("===band %d===\n", i); 1655 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1656 printf("chan1 num=%d\n", chans[0].num); 1657 for (c = 0; c < 2; c++) { 1658 for (j = 0; j < IWN_NSAMPLES; j++) { 1659 printf("chain %d, sample %d: temp=%d gain=%d " 1660 "power=%d pa_det=%d\n", c, j, 1661 chans[0].samples[c][j].temp, 1662 chans[0].samples[c][j].gain, 1663 chans[0].samples[c][j].power, 1664 chans[0].samples[c][j].pa_det); 1665 } 1666 } 1667 printf("chan2 num=%d\n", chans[1].num); 1668 for (c = 0; c < 2; c++) { 1669 for (j = 0; j < IWN_NSAMPLES; j++) { 1670 printf("chain %d, sample %d: temp=%d gain=%d " 1671 "power=%d pa_det=%d\n", c, j, 1672 chans[1].samples[c][j].temp, 1673 chans[1].samples[c][j].gain, 1674 chans[1].samples[c][j].power, 1675 chans[1].samples[c][j].pa_det); 1676 } 1677 } 1678 } 1679 #endif 1680 1681 static void 1682 iwn5000_read_eeprom(struct iwn_softc *sc) 1683 { 1684 struct iwn5000_eeprom_calib_hdr hdr; 1685 int32_t temp, volt; 1686 uint32_t addr, base; 1687 int i; 1688 uint16_t val; 1689 1690 /* Read regulatory domain (4 ASCII characters.) */ 1691 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1692 base = le16toh(val); 1693 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1694 sc->eeprom_domain, 4); 1695 1696 /* Read the list of authorized channels (20MHz ones only.) */ 1697 for (i = 0; i < 5; i++) { 1698 addr = base + iwn5000_regulatory_bands[i]; 1699 iwn_read_eeprom_channels(sc, i, addr); 1700 } 1701 1702 /* Read enhanced TX power information for 6000 Series. */ 1703 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1704 iwn_read_eeprom_enhinfo(sc); 1705 1706 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1707 base = le16toh(val); 1708 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1709 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1710 "%s: calib version=%u pa type=%u voltage=%u\n", 1711 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1712 sc->calib_ver = hdr.version; 1713 1714 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1715 /* Compute temperature offset. */ 1716 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1717 temp = le16toh(val); 1718 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1719 volt = le16toh(val); 1720 sc->temp_off = temp - (volt / -5); 1721 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1722 temp, volt, sc->temp_off); 1723 } 1724 } 1725 1726 /* 1727 * Translate EEPROM flags to net80211. 1728 */ 1729 static uint32_t 1730 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1731 { 1732 uint32_t nflags; 1733 1734 nflags = 0; 1735 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1736 nflags |= IEEE80211_CHAN_PASSIVE; 1737 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1738 nflags |= IEEE80211_CHAN_NOADHOC; 1739 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1740 nflags |= IEEE80211_CHAN_DFS; 1741 /* XXX apparently IBSS may still be marked */ 1742 nflags |= IEEE80211_CHAN_NOADHOC; 1743 } 1744 1745 return nflags; 1746 } 1747 1748 static void 1749 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1750 { 1751 struct ifnet *ifp = sc->sc_ifp; 1752 struct ieee80211com *ic = ifp->if_l2com; 1753 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1754 const struct iwn_chan_band *band = &iwn_bands[n]; 1755 struct ieee80211_channel *c; 1756 int i, chan, nflags; 1757 1758 for (i = 0; i < band->nchan; i++) { 1759 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1760 DPRINTF(sc, IWN_DEBUG_RESET, 1761 "skip chan %d flags 0x%x maxpwr %d\n", 1762 band->chan[i], channels[i].flags, 1763 channels[i].maxpwr); 1764 continue; 1765 } 1766 chan = band->chan[i]; 1767 nflags = iwn_eeprom_channel_flags(&channels[i]); 1768 1769 DPRINTF(sc, IWN_DEBUG_RESET, 1770 "add chan %d flags 0x%x maxpwr %d\n", 1771 chan, channels[i].flags, channels[i].maxpwr); 1772 1773 c = &ic->ic_channels[ic->ic_nchans++]; 1774 c->ic_ieee = chan; 1775 c->ic_maxregpower = channels[i].maxpwr; 1776 c->ic_maxpower = 2*c->ic_maxregpower; 1777 1778 /* Save maximum allowed TX power for this channel. */ 1779 sc->maxpwr[chan] = channels[i].maxpwr; 1780 1781 if (n == 0) { /* 2GHz band */ 1782 c->ic_freq = ieee80211_ieee2mhz(chan, 1783 IEEE80211_CHAN_G); 1784 1785 /* G =>'s B is supported */ 1786 c->ic_flags = IEEE80211_CHAN_B | nflags; 1787 1788 c = &ic->ic_channels[ic->ic_nchans++]; 1789 c[0] = c[-1]; 1790 c->ic_flags = IEEE80211_CHAN_G | nflags; 1791 } else { /* 5GHz band */ 1792 c->ic_freq = ieee80211_ieee2mhz(chan, 1793 IEEE80211_CHAN_A); 1794 c->ic_flags = IEEE80211_CHAN_A | nflags; 1795 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1796 } 1797 #if 0 /* HT */ 1798 /* XXX no constraints on using HT20 */ 1799 /* add HT20, HT40 added separately */ 1800 c = &ic->ic_channels[ic->ic_nchans++]; 1801 c[0] = c[-1]; 1802 c->ic_flags |= IEEE80211_CHAN_HT20; 1803 /* XXX NARROW =>'s 1/2 and 1/4 width? */ 1804 #endif 1805 } 1806 } 1807 1808 #if 0 /* HT */ 1809 static void 1810 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1811 { 1812 struct ifnet *ifp = sc->sc_ifp; 1813 struct ieee80211com *ic = ifp->if_l2com; 1814 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1815 const struct iwn_chan_band *band = &iwn_bands[n]; 1816 struct ieee80211_channel *c, *cent, *extc; 1817 int i; 1818 1819 for (i = 0; i < band->nchan; i++) { 1820 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) || 1821 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) { 1822 DPRINTF(sc, IWN_DEBUG_RESET, 1823 "skip chan %d flags 0x%x maxpwr %d\n", 1824 band->chan[i], channels[i].flags, 1825 channels[i].maxpwr); 1826 continue; 1827 } 1828 /* 1829 * Each entry defines an HT40 channel pair; find the 1830 * center channel, then the extension channel above. 1831 */ 1832 cent = ieee80211_find_channel_byieee(ic, band->chan[i], 1833 band->flags & ~IEEE80211_CHAN_HT); 1834 if (cent == NULL) { /* XXX shouldn't happen */ 1835 device_printf(sc->sc_dev, 1836 "%s: no entry for channel %d\n", 1837 __func__, band->chan[i]); 1838 continue; 1839 } 1840 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1841 band->flags & ~IEEE80211_CHAN_HT); 1842 if (extc == NULL) { 1843 DPRINTF(sc, IWN_DEBUG_RESET, 1844 "skip chan %d, extension channel not found\n", 1845 band->chan[i]); 1846 continue; 1847 } 1848 1849 DPRINTF(sc, IWN_DEBUG_RESET, 1850 "add ht40 chan %d flags 0x%x maxpwr %d\n", 1851 band->chan[i], channels[i].flags, channels[i].maxpwr); 1852 1853 c = &ic->ic_channels[ic->ic_nchans++]; 1854 c[0] = cent[0]; 1855 c->ic_extieee = extc->ic_ieee; 1856 c->ic_flags &= ~IEEE80211_CHAN_HT; 1857 c->ic_flags |= IEEE80211_CHAN_HT40U; 1858 c = &ic->ic_channels[ic->ic_nchans++]; 1859 c[0] = extc[0]; 1860 c->ic_extieee = cent->ic_ieee; 1861 c->ic_flags &= ~IEEE80211_CHAN_HT; 1862 c->ic_flags |= IEEE80211_CHAN_HT40D; 1863 } 1864 } 1865 #endif 1866 1867 static void 1868 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1869 { 1870 struct ifnet *ifp = sc->sc_ifp; 1871 struct ieee80211com *ic = ifp->if_l2com; 1872 1873 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 1874 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 1875 1876 if (n < 5) 1877 iwn_read_eeprom_band(sc, n); 1878 #if 0 /* HT */ 1879 else 1880 iwn_read_eeprom_ht40(sc, n); 1881 #endif 1882 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1883 } 1884 1885 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0])) 1886 1887 static void 1888 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1889 { 1890 struct iwn_eeprom_enhinfo enhinfo[35]; 1891 uint16_t val, base; 1892 int8_t maxpwr; 1893 int i; 1894 1895 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1896 base = le16toh(val); 1897 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1898 enhinfo, sizeof enhinfo); 1899 1900 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1901 for (i = 0; i < nitems(enhinfo); i++) { 1902 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0) 1903 continue; /* Skip invalid entries. */ 1904 1905 maxpwr = 0; 1906 if (sc->txchainmask & IWN_ANT_A) 1907 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1908 if (sc->txchainmask & IWN_ANT_B) 1909 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1910 if (sc->txchainmask & IWN_ANT_C) 1911 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1912 if (sc->ntxchains == 2) 1913 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1914 else if (sc->ntxchains == 3) 1915 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1916 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1917 1918 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i, 1919 maxpwr); 1920 sc->enh_maxpwr[i] = maxpwr; 1921 } 1922 } 1923 1924 static struct ieee80211_node * 1925 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1926 { 1927 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 1928 } 1929 1930 static void 1931 iwn_newassoc(struct ieee80211_node *ni, int isnew) 1932 { 1933 /* XXX move */ 1934 ieee80211_ratectl_node_init(ni); 1935 } 1936 1937 static int 1938 iwn_media_change(struct ifnet *ifp) 1939 { 1940 int error = ieee80211_media_change(ifp); 1941 /* NB: only the fixed rate can change and that doesn't need a reset */ 1942 return (error == ENETRESET ? 0 : error); 1943 } 1944 1945 static int 1946 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1947 { 1948 struct iwn_vap *ivp = IWN_VAP(vap); 1949 struct ieee80211com *ic = vap->iv_ic; 1950 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1951 int error; 1952 1953 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1954 ieee80211_state_name[vap->iv_state], 1955 ieee80211_state_name[nstate]); 1956 1957 IEEE80211_UNLOCK(ic); 1958 IWN_LOCK(sc); 1959 callout_stop(&sc->sc_timer_to); 1960 1961 switch (nstate) { 1962 case IEEE80211_S_ASSOC: 1963 if (vap->iv_state != IEEE80211_S_RUN) 1964 break; 1965 /* FALLTHROUGH */ 1966 case IEEE80211_S_AUTH: 1967 if (vap->iv_state == IEEE80211_S_AUTH) 1968 break; 1969 1970 /* 1971 * !AUTH -> AUTH transition requires state reset to handle 1972 * reassociations correctly. 1973 */ 1974 sc->rxon.associd = 0; 1975 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1976 iwn_calib_reset(sc); 1977 error = iwn_auth(sc, vap); 1978 break; 1979 1980 case IEEE80211_S_RUN: 1981 /* 1982 * RUN -> RUN transition; Just restart the timers. 1983 */ 1984 if (vap->iv_state == IEEE80211_S_RUN) { 1985 iwn_calib_reset(sc); 1986 break; 1987 } 1988 1989 /* 1990 * !RUN -> RUN requires setting the association id 1991 * which is done with a firmware cmd. We also defer 1992 * starting the timers until that work is done. 1993 */ 1994 error = iwn_run(sc, vap); 1995 break; 1996 1997 default: 1998 break; 1999 } 2000 IWN_UNLOCK(sc); 2001 IEEE80211_LOCK(ic); 2002 return ivp->iv_newstate(vap, nstate, arg); 2003 } 2004 2005 /* 2006 * Process an RX_PHY firmware notification. This is usually immediately 2007 * followed by an MPDU_RX_DONE notification. 2008 */ 2009 static void 2010 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2011 struct iwn_rx_data *data) 2012 { 2013 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2014 2015 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2016 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2017 2018 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2019 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2020 sc->last_rx_valid = 1; 2021 } 2022 2023 static void 2024 iwn_timer_timeout(void *arg) 2025 { 2026 struct iwn_softc *sc = arg; 2027 uint32_t flags = 0; 2028 2029 IWN_LOCK_ASSERT(sc); 2030 2031 if (sc->calib_cnt && --sc->calib_cnt == 0) { 2032 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2033 "send statistics request"); 2034 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2035 sizeof flags, 1); 2036 sc->calib_cnt = 60; /* do calibration every 60s */ 2037 } 2038 iwn_watchdog(sc); /* NB: piggyback tx watchdog */ 2039 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2040 } 2041 2042 static void 2043 iwn_calib_reset(struct iwn_softc *sc) 2044 { 2045 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); 2046 sc->calib_cnt = 60; /* do calibration every 60s */ 2047 } 2048 2049 /* 2050 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2051 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2052 */ 2053 static void 2054 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2055 struct iwn_rx_data *data) 2056 { 2057 const struct iwn_hal *hal = sc->sc_hal; 2058 struct ifnet *ifp = sc->sc_ifp; 2059 struct ieee80211com *ic = ifp->if_l2com; 2060 struct iwn_rx_ring *ring = &sc->rxq; 2061 struct ieee80211_frame *wh; 2062 struct ieee80211_node *ni; 2063 struct mbuf *m, *m1; 2064 struct iwn_rx_stat *stat; 2065 caddr_t head; 2066 bus_addr_t paddr; 2067 uint32_t flags; 2068 int error, len, rssi, nf; 2069 2070 if (desc->type == IWN_MPDU_RX_DONE) { 2071 /* Check for prior RX_PHY notification. */ 2072 if (!sc->last_rx_valid) { 2073 DPRINTF(sc, IWN_DEBUG_ANY, 2074 "%s: missing RX_PHY\n", __func__); 2075 ifp->if_ierrors++; 2076 return; 2077 } 2078 sc->last_rx_valid = 0; 2079 stat = &sc->last_rx_stat; 2080 } else 2081 stat = (struct iwn_rx_stat *)(desc + 1); 2082 2083 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2084 2085 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2086 device_printf(sc->sc_dev, 2087 "%s: invalid rx statistic header, len %d\n", 2088 __func__, stat->cfg_phy_len); 2089 ifp->if_ierrors++; 2090 return; 2091 } 2092 if (desc->type == IWN_MPDU_RX_DONE) { 2093 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2094 head = (caddr_t)(mpdu + 1); 2095 len = le16toh(mpdu->len); 2096 } else { 2097 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2098 len = le16toh(stat->len); 2099 } 2100 2101 flags = le32toh(*(uint32_t *)(head + len)); 2102 2103 /* Discard frames with a bad FCS early. */ 2104 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2105 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n", 2106 __func__, flags); 2107 ifp->if_ierrors++; 2108 return; 2109 } 2110 /* Discard frames that are too short. */ 2111 if (len < sizeof (*wh)) { 2112 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2113 __func__, len); 2114 ifp->if_ierrors++; 2115 return; 2116 } 2117 2118 /* XXX don't need mbuf, just dma buffer */ 2119 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 2120 if (m1 == NULL) { 2121 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2122 __func__); 2123 ifp->if_ierrors++; 2124 return; 2125 } 2126 bus_dmamap_unload(ring->data_dmat, data->map); 2127 2128 error = bus_dmamap_load(ring->data_dmat, data->map, 2129 mtod(m1, caddr_t), MJUMPAGESIZE, 2130 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2131 if (error != 0 && error != EFBIG) { 2132 device_printf(sc->sc_dev, 2133 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2134 m_freem(m1); 2135 ifp->if_ierrors++; 2136 return; 2137 } 2138 2139 m = data->m; 2140 data->m = m1; 2141 /* Update RX descriptor. */ 2142 ring->desc[ring->cur] = htole32(paddr >> 8); 2143 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2144 BUS_DMASYNC_PREWRITE); 2145 2146 /* Finalize mbuf. */ 2147 m->m_pkthdr.rcvif = ifp; 2148 m->m_data = head; 2149 m->m_pkthdr.len = m->m_len = len; 2150 2151 rssi = hal->get_rssi(sc, stat); 2152 2153 /* Grab a reference to the source node. */ 2154 wh = mtod(m, struct ieee80211_frame *); 2155 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2156 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2157 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2158 2159 if (ieee80211_radiotap_active(ic)) { 2160 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2161 2162 tap->wr_tsft = htole64(stat->tstamp); 2163 tap->wr_flags = 0; 2164 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2165 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2166 switch (stat->rate) { 2167 /* CCK rates. */ 2168 case 10: tap->wr_rate = 2; break; 2169 case 20: tap->wr_rate = 4; break; 2170 case 55: tap->wr_rate = 11; break; 2171 case 110: tap->wr_rate = 22; break; 2172 /* OFDM rates. */ 2173 case 0xd: tap->wr_rate = 12; break; 2174 case 0xf: tap->wr_rate = 18; break; 2175 case 0x5: tap->wr_rate = 24; break; 2176 case 0x7: tap->wr_rate = 36; break; 2177 case 0x9: tap->wr_rate = 48; break; 2178 case 0xb: tap->wr_rate = 72; break; 2179 case 0x1: tap->wr_rate = 96; break; 2180 case 0x3: tap->wr_rate = 108; break; 2181 /* Unknown rate: should not happen. */ 2182 default: tap->wr_rate = 0; 2183 } 2184 tap->wr_dbm_antsignal = rssi; 2185 tap->wr_dbm_antnoise = nf; 2186 } 2187 2188 IWN_UNLOCK(sc); 2189 2190 /* Send the frame to the 802.11 layer. */ 2191 if (ni != NULL) { 2192 (void) ieee80211_input(ni, m, rssi - nf, nf); 2193 /* Node is no longer needed. */ 2194 ieee80211_free_node(ni); 2195 } else 2196 (void) ieee80211_input_all(ic, m, rssi - nf, nf); 2197 2198 IWN_LOCK(sc); 2199 } 2200 2201 #if 0 /* HT */ 2202 /* Process an incoming Compressed BlockAck. */ 2203 static void 2204 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2205 struct iwn_rx_data *data) 2206 { 2207 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2208 struct iwn_tx_ring *txq; 2209 2210 txq = &sc->txq[letoh16(ba->qid)]; 2211 /* XXX TBD */ 2212 } 2213 #endif 2214 2215 /* 2216 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2217 * The latter is sent by the firmware after each received beacon. 2218 */ 2219 static void 2220 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2221 struct iwn_rx_data *data) 2222 { 2223 const struct iwn_hal *hal = sc->sc_hal; 2224 struct ifnet *ifp = sc->sc_ifp; 2225 struct ieee80211com *ic = ifp->if_l2com; 2226 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2227 struct iwn_calib_state *calib = &sc->calib; 2228 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2229 int temp; 2230 2231 /* Beacon stats are meaningful only when associated and not scanning. */ 2232 if (vap->iv_state != IEEE80211_S_RUN || 2233 (ic->ic_flags & IEEE80211_F_SCAN)) 2234 return; 2235 2236 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2237 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type); 2238 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */ 2239 2240 /* Test if temperature has changed. */ 2241 if (stats->general.temp != sc->rawtemp) { 2242 /* Convert "raw" temperature to degC. */ 2243 sc->rawtemp = stats->general.temp; 2244 temp = hal->get_temperature(sc); 2245 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2246 __func__, temp); 2247 2248 /* Update TX power if need be (4965AGN only.) */ 2249 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2250 iwn4965_power_calibration(sc, temp); 2251 } 2252 2253 if (desc->type != IWN_BEACON_STATISTICS) 2254 return; /* Reply to a statistics request. */ 2255 2256 sc->noise = iwn_get_noise(&stats->rx.general); 2257 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2258 2259 /* Test that RSSI and noise are present in stats report. */ 2260 if (le32toh(stats->rx.general.flags) != 1) { 2261 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2262 "received statistics without RSSI"); 2263 return; 2264 } 2265 2266 if (calib->state == IWN_CALIB_STATE_ASSOC) 2267 iwn_collect_noise(sc, &stats->rx.general); 2268 else if (calib->state == IWN_CALIB_STATE_RUN) 2269 iwn_tune_sensitivity(sc, &stats->rx); 2270 } 2271 2272 /* 2273 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2274 * and 5000 adapters have different incompatible TX status formats. 2275 */ 2276 static void 2277 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2278 struct iwn_rx_data *data) 2279 { 2280 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2281 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2282 2283 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2284 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2285 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2286 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2287 le32toh(stat->status)); 2288 2289 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2290 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff); 2291 } 2292 2293 static void 2294 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2295 struct iwn_rx_data *data) 2296 { 2297 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2298 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2299 2300 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2301 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2302 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2303 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2304 le32toh(stat->status)); 2305 2306 #ifdef notyet 2307 /* Reset TX scheduler slot. */ 2308 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2309 #endif 2310 2311 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2312 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff); 2313 } 2314 2315 /* 2316 * Adapter-independent backend for TX_DONE firmware notifications. 2317 */ 2318 static void 2319 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2320 uint8_t status) 2321 { 2322 struct ifnet *ifp = sc->sc_ifp; 2323 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2324 struct iwn_tx_data *data = &ring->data[desc->idx]; 2325 struct mbuf *m; 2326 struct ieee80211_node *ni; 2327 struct ieee80211vap *vap; 2328 2329 KASSERT(data->ni != NULL, ("no node")); 2330 2331 /* Unmap and free mbuf. */ 2332 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2333 bus_dmamap_unload(ring->data_dmat, data->map); 2334 m = data->m, data->m = NULL; 2335 ni = data->ni, data->ni = NULL; 2336 vap = ni->ni_vap; 2337 2338 if (m->m_flags & M_TXCB) { 2339 /* 2340 * Channels marked for "radar" require traffic to be received 2341 * to unlock before we can transmit. Until traffic is seen 2342 * any attempt to transmit is returned immediately with status 2343 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2344 * happen on first authenticate after scanning. To workaround 2345 * this we ignore a failure of this sort in AUTH state so the 2346 * 802.11 layer will fall back to using a timeout to wait for 2347 * the AUTH reply. This allows the firmware time to see 2348 * traffic so a subsequent retry of AUTH succeeds. It's 2349 * unclear why the firmware does not maintain state for 2350 * channels recently visited as this would allow immediate 2351 * use of the channel after a scan (where we see traffic). 2352 */ 2353 if (status == IWN_TX_FAIL_TX_LOCKED && 2354 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2355 ieee80211_process_callback(ni, m, 0); 2356 else 2357 ieee80211_process_callback(ni, m, 2358 (status & IWN_TX_FAIL) != 0); 2359 } 2360 2361 /* 2362 * Update rate control statistics for the node. 2363 */ 2364 if (status & 0x80) { 2365 ifp->if_oerrors++; 2366 ieee80211_ratectl_tx_complete(vap, ni, 2367 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2368 } else { 2369 ieee80211_ratectl_tx_complete(vap, ni, 2370 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2371 } 2372 m_freem(m); 2373 ieee80211_free_node(ni); 2374 2375 sc->sc_tx_timer = 0; 2376 if (--ring->queued < IWN_TX_RING_LOMARK) { 2377 sc->qfullmsk &= ~(1 << ring->qid); 2378 if (sc->qfullmsk == 0 && 2379 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2380 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2381 iwn_start_locked(ifp); 2382 } 2383 } 2384 } 2385 2386 /* 2387 * Process a "command done" firmware notification. This is where we wakeup 2388 * processes waiting for a synchronous command completion. 2389 */ 2390 static void 2391 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2392 { 2393 struct iwn_tx_ring *ring = &sc->txq[4]; 2394 struct iwn_tx_data *data; 2395 2396 if ((desc->qid & 0xf) != 4) 2397 return; /* Not a command ack. */ 2398 2399 data = &ring->data[desc->idx]; 2400 2401 /* If the command was mapped in an mbuf, free it. */ 2402 if (data->m != NULL) { 2403 bus_dmamap_unload(ring->data_dmat, data->map); 2404 m_freem(data->m); 2405 data->m = NULL; 2406 } 2407 wakeup(&ring->desc[desc->idx]); 2408 } 2409 2410 /* 2411 * Process an INT_FH_RX or INT_SW_RX interrupt. 2412 */ 2413 static void 2414 iwn_notif_intr(struct iwn_softc *sc) 2415 { 2416 struct ifnet *ifp = sc->sc_ifp; 2417 struct ieee80211com *ic = ifp->if_l2com; 2418 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2419 uint16_t hw; 2420 2421 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 2422 BUS_DMASYNC_POSTREAD); 2423 2424 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2425 while (sc->rxq.cur != hw) { 2426 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2427 struct iwn_rx_desc *desc; 2428 2429 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2430 BUS_DMASYNC_POSTREAD); 2431 desc = mtod(data->m, struct iwn_rx_desc *); 2432 2433 DPRINTF(sc, IWN_DEBUG_RECV, 2434 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 2435 __func__, desc->qid & 0xf, desc->idx, desc->flags, 2436 desc->type, iwn_intr_str(desc->type), 2437 le16toh(desc->len)); 2438 2439 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2440 iwn_cmd_done(sc, desc); 2441 2442 switch (desc->type) { 2443 case IWN_RX_PHY: 2444 iwn_rx_phy(sc, desc, data); 2445 break; 2446 2447 case IWN_RX_DONE: /* 4965AGN only. */ 2448 case IWN_MPDU_RX_DONE: 2449 /* An 802.11 frame has been received. */ 2450 iwn_rx_done(sc, desc, data); 2451 break; 2452 2453 #if 0 /* HT */ 2454 case IWN_RX_COMPRESSED_BA: 2455 /* A Compressed BlockAck has been received. */ 2456 iwn_rx_compressed_ba(sc, desc, data); 2457 break; 2458 #endif 2459 2460 case IWN_TX_DONE: 2461 /* An 802.11 frame has been transmitted. */ 2462 sc->sc_hal->tx_done(sc, desc, data); 2463 break; 2464 2465 case IWN_RX_STATISTICS: 2466 case IWN_BEACON_STATISTICS: 2467 iwn_rx_statistics(sc, desc, data); 2468 break; 2469 2470 case IWN_BEACON_MISSED: 2471 { 2472 struct iwn_beacon_missed *miss = 2473 (struct iwn_beacon_missed *)(desc + 1); 2474 int misses; 2475 2476 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2477 BUS_DMASYNC_POSTREAD); 2478 misses = le32toh(miss->consecutive); 2479 2480 /* XXX not sure why we're notified w/ zero */ 2481 if (misses == 0) 2482 break; 2483 DPRINTF(sc, IWN_DEBUG_STATE, 2484 "%s: beacons missed %d/%d\n", __func__, 2485 misses, le32toh(miss->total)); 2486 2487 /* 2488 * If more than 5 consecutive beacons are missed, 2489 * reinitialize the sensitivity state machine. 2490 */ 2491 if (vap->iv_state == IEEE80211_S_RUN && misses > 5) 2492 (void) iwn_init_sensitivity(sc); 2493 if (misses >= vap->iv_bmissthreshold) { 2494 IWN_UNLOCK(sc); 2495 ieee80211_beacon_miss(ic); 2496 IWN_LOCK(sc); 2497 } 2498 break; 2499 } 2500 case IWN_UC_READY: 2501 { 2502 struct iwn_ucode_info *uc = 2503 (struct iwn_ucode_info *)(desc + 1); 2504 2505 /* The microcontroller is ready. */ 2506 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2507 BUS_DMASYNC_POSTREAD); 2508 DPRINTF(sc, IWN_DEBUG_RESET, 2509 "microcode alive notification version=%d.%d " 2510 "subtype=%x alive=%x\n", uc->major, uc->minor, 2511 uc->subtype, le32toh(uc->valid)); 2512 2513 if (le32toh(uc->valid) != 1) { 2514 device_printf(sc->sc_dev, 2515 "microcontroller initialization failed"); 2516 break; 2517 } 2518 if (uc->subtype == IWN_UCODE_INIT) { 2519 /* Save microcontroller report. */ 2520 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2521 } 2522 /* Save the address of the error log in SRAM. */ 2523 sc->errptr = le32toh(uc->errptr); 2524 break; 2525 } 2526 case IWN_STATE_CHANGED: 2527 { 2528 uint32_t *status = (uint32_t *)(desc + 1); 2529 2530 /* 2531 * State change allows hardware switch change to be 2532 * noted. However, we handle this in iwn_intr as we 2533 * get both the enable/disble intr. 2534 */ 2535 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2536 BUS_DMASYNC_POSTREAD); 2537 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 2538 le32toh(*status)); 2539 break; 2540 } 2541 case IWN_START_SCAN: 2542 { 2543 struct iwn_start_scan *scan = 2544 (struct iwn_start_scan *)(desc + 1); 2545 2546 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2547 BUS_DMASYNC_POSTREAD); 2548 DPRINTF(sc, IWN_DEBUG_ANY, 2549 "%s: scanning channel %d status %x\n", 2550 __func__, scan->chan, le32toh(scan->status)); 2551 break; 2552 } 2553 case IWN_STOP_SCAN: 2554 { 2555 struct iwn_stop_scan *scan = 2556 (struct iwn_stop_scan *)(desc + 1); 2557 2558 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2559 BUS_DMASYNC_POSTREAD); 2560 DPRINTF(sc, IWN_DEBUG_STATE, 2561 "scan finished nchan=%d status=%d chan=%d\n", 2562 scan->nchan, scan->status, scan->chan); 2563 2564 IWN_UNLOCK(sc); 2565 ieee80211_scan_next(vap); 2566 IWN_LOCK(sc); 2567 break; 2568 } 2569 case IWN5000_CALIBRATION_RESULT: 2570 iwn5000_rx_calib_result(sc, desc, data); 2571 break; 2572 2573 case IWN5000_CALIBRATION_DONE: 2574 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 2575 wakeup(sc); 2576 break; 2577 } 2578 2579 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 2580 } 2581 2582 /* Tell the firmware what we have processed. */ 2583 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 2584 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 2585 } 2586 2587 /* 2588 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2589 * from power-down sleep mode. 2590 */ 2591 static void 2592 iwn_wakeup_intr(struct iwn_softc *sc) 2593 { 2594 int qid; 2595 2596 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 2597 __func__); 2598 2599 /* Wakeup RX and TX rings. */ 2600 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 2601 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) { 2602 struct iwn_tx_ring *ring = &sc->txq[qid]; 2603 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 2604 } 2605 } 2606 2607 static void 2608 iwn_rftoggle_intr(struct iwn_softc *sc) 2609 { 2610 struct ifnet *ifp = sc->sc_ifp; 2611 struct ieee80211com *ic = ifp->if_l2com; 2612 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 2613 2614 IWN_LOCK_ASSERT(sc); 2615 2616 device_printf(sc->sc_dev, "RF switch: radio %s\n", 2617 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 2618 if (tmp & IWN_GP_CNTRL_RFKILL) 2619 ieee80211_runtask(ic, &sc->sc_radioon_task); 2620 else 2621 ieee80211_runtask(ic, &sc->sc_radiooff_task); 2622 } 2623 2624 /* 2625 * Dump the error log of the firmware when a firmware panic occurs. Although 2626 * we can't debug the firmware because it is neither open source nor free, it 2627 * can help us to identify certain classes of problems. 2628 */ 2629 static void 2630 iwn_fatal_intr(struct iwn_softc *sc) 2631 { 2632 const struct iwn_hal *hal = sc->sc_hal; 2633 struct iwn_fw_dump dump; 2634 int i; 2635 2636 IWN_LOCK_ASSERT(sc); 2637 2638 /* Force a complete recalibration on next init. */ 2639 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 2640 2641 /* Check that the error log address is valid. */ 2642 if (sc->errptr < IWN_FW_DATA_BASE || 2643 sc->errptr + sizeof (dump) > 2644 IWN_FW_DATA_BASE + hal->fw_data_maxsz) { 2645 printf("%s: bad firmware error log address 0x%08x\n", 2646 __func__, sc->errptr); 2647 return; 2648 } 2649 if (iwn_nic_lock(sc) != 0) { 2650 printf("%s: could not read firmware error log\n", 2651 __func__); 2652 return; 2653 } 2654 /* Read firmware error log from SRAM. */ 2655 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 2656 sizeof (dump) / sizeof (uint32_t)); 2657 iwn_nic_unlock(sc); 2658 2659 if (dump.valid == 0) { 2660 printf("%s: firmware error log is empty\n", 2661 __func__); 2662 return; 2663 } 2664 printf("firmware error log:\n"); 2665 printf(" error type = \"%s\" (0x%08X)\n", 2666 (dump.id < nitems(iwn_fw_errmsg)) ? 2667 iwn_fw_errmsg[dump.id] : "UNKNOWN", 2668 dump.id); 2669 printf(" program counter = 0x%08X\n", dump.pc); 2670 printf(" source line = 0x%08X\n", dump.src_line); 2671 printf(" error data = 0x%08X%08X\n", 2672 dump.error_data[0], dump.error_data[1]); 2673 printf(" branch link = 0x%08X%08X\n", 2674 dump.branch_link[0], dump.branch_link[1]); 2675 printf(" interrupt link = 0x%08X%08X\n", 2676 dump.interrupt_link[0], dump.interrupt_link[1]); 2677 printf(" time = %u\n", dump.time[0]); 2678 2679 /* Dump driver status (TX and RX rings) while we're here. */ 2680 printf("driver status:\n"); 2681 for (i = 0; i < hal->ntxqs; i++) { 2682 struct iwn_tx_ring *ring = &sc->txq[i]; 2683 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2684 i, ring->qid, ring->cur, ring->queued); 2685 } 2686 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2687 } 2688 2689 static void 2690 iwn_intr(void *arg) 2691 { 2692 struct iwn_softc *sc = arg; 2693 struct ifnet *ifp = sc->sc_ifp; 2694 uint32_t r1, r2, tmp; 2695 2696 IWN_LOCK(sc); 2697 2698 /* Disable interrupts. */ 2699 IWN_WRITE(sc, IWN_INT_MASK, 0); 2700 2701 /* Read interrupts from ICT (fast) or from registers (slow). */ 2702 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2703 tmp = 0; 2704 while (sc->ict[sc->ict_cur] != 0) { 2705 tmp |= sc->ict[sc->ict_cur]; 2706 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 2707 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 2708 } 2709 tmp = le32toh(tmp); 2710 if (tmp == 0xffffffff) /* Shouldn't happen. */ 2711 tmp = 0; 2712 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 2713 tmp |= 0x8000; 2714 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 2715 r2 = 0; /* Unused. */ 2716 } else { 2717 r1 = IWN_READ(sc, IWN_INT); 2718 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2719 return; /* Hardware gone! */ 2720 r2 = IWN_READ(sc, IWN_FH_INT); 2721 } 2722 2723 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 2724 2725 if (r1 == 0 && r2 == 0) 2726 goto done; /* Interrupt not for us. */ 2727 2728 /* Acknowledge interrupts. */ 2729 IWN_WRITE(sc, IWN_INT, r1); 2730 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 2731 IWN_WRITE(sc, IWN_FH_INT, r2); 2732 2733 if (r1 & IWN_INT_RF_TOGGLED) { 2734 iwn_rftoggle_intr(sc); 2735 goto done; 2736 } 2737 if (r1 & IWN_INT_CT_REACHED) { 2738 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 2739 __func__); 2740 } 2741 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 2742 iwn_fatal_intr(sc); 2743 ifp->if_flags &= ~IFF_UP; 2744 iwn_stop_locked(sc); 2745 goto done; 2746 } 2747 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 2748 (r2 & IWN_FH_INT_RX)) { 2749 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 2750 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 2751 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 2752 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2753 IWN_INT_PERIODIC_DIS); 2754 iwn_notif_intr(sc); 2755 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 2756 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 2757 IWN_INT_PERIODIC_ENA); 2758 } 2759 } else 2760 iwn_notif_intr(sc); 2761 } 2762 2763 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 2764 if (sc->sc_flags & IWN_FLAG_USE_ICT) 2765 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 2766 wakeup(sc); /* FH DMA transfer completed. */ 2767 } 2768 2769 if (r1 & IWN_INT_ALIVE) 2770 wakeup(sc); /* Firmware is alive. */ 2771 2772 if (r1 & IWN_INT_WAKEUP) 2773 iwn_wakeup_intr(sc); 2774 2775 done: 2776 /* Re-enable interrupts. */ 2777 if (ifp->if_flags & IFF_UP) 2778 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2779 2780 IWN_UNLOCK(sc); 2781 } 2782 2783 /* 2784 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 2785 * 5000 adapters use a slightly different format.) 2786 */ 2787 static void 2788 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2789 uint16_t len) 2790 { 2791 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 2792 2793 *w = htole16(len + 8); 2794 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2795 BUS_DMASYNC_PREWRITE); 2796 if (idx < IWN_SCHED_WINSZ) { 2797 *(w + IWN_TX_RING_COUNT) = *w; 2798 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2799 BUS_DMASYNC_PREWRITE); 2800 } 2801 } 2802 2803 static void 2804 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 2805 uint16_t len) 2806 { 2807 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2808 2809 *w = htole16(id << 12 | (len + 8)); 2810 2811 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2812 BUS_DMASYNC_PREWRITE); 2813 if (idx < IWN_SCHED_WINSZ) { 2814 *(w + IWN_TX_RING_COUNT) = *w; 2815 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2816 BUS_DMASYNC_PREWRITE); 2817 } 2818 } 2819 2820 #ifdef notyet 2821 static void 2822 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 2823 { 2824 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 2825 2826 *w = (*w & htole16(0xf000)) | htole16(1); 2827 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2828 BUS_DMASYNC_PREWRITE); 2829 if (idx < IWN_SCHED_WINSZ) { 2830 *(w + IWN_TX_RING_COUNT) = *w; 2831 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 2832 BUS_DMASYNC_PREWRITE); 2833 } 2834 } 2835 #endif 2836 2837 static uint8_t 2838 iwn_plcp_signal(int rate) { 2839 int i; 2840 2841 for (i = 0; i < IWN_RIDX_MAX + 1; i++) { 2842 if (rate == iwn_rates[i].rate) 2843 return i; 2844 } 2845 2846 return 0; 2847 } 2848 2849 static int 2850 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 2851 struct iwn_tx_ring *ring) 2852 { 2853 const struct iwn_hal *hal = sc->sc_hal; 2854 const struct ieee80211_txparam *tp; 2855 const struct iwn_rate *rinfo; 2856 struct ieee80211vap *vap = ni->ni_vap; 2857 struct ieee80211com *ic = ni->ni_ic; 2858 struct iwn_node *wn = (void *)ni; 2859 struct iwn_tx_desc *desc; 2860 struct iwn_tx_data *data; 2861 struct iwn_tx_cmd *cmd; 2862 struct iwn_cmd_data *tx; 2863 struct ieee80211_frame *wh; 2864 struct ieee80211_key *k = NULL; 2865 struct mbuf *mnew; 2866 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 2867 uint32_t flags; 2868 u_int hdrlen; 2869 int totlen, error, pad, nsegs = 0, i, rate; 2870 uint8_t ridx, type, txant; 2871 2872 IWN_LOCK_ASSERT(sc); 2873 2874 wh = mtod(m, struct ieee80211_frame *); 2875 hdrlen = ieee80211_anyhdrsize(wh); 2876 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2877 2878 desc = &ring->desc[ring->cur]; 2879 data = &ring->data[ring->cur]; 2880 2881 /* Choose a TX rate index. */ 2882 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 2883 if (type == IEEE80211_FC0_TYPE_MGT) 2884 rate = tp->mgmtrate; 2885 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 2886 rate = tp->mcastrate; 2887 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2888 rate = tp->ucastrate; 2889 else { 2890 /* XXX pass pktlen */ 2891 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2892 rate = ni->ni_txrate; 2893 } 2894 ridx = iwn_plcp_signal(rate); 2895 rinfo = &iwn_rates[ridx]; 2896 2897 /* Encrypt the frame if need be. */ 2898 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 2899 k = ieee80211_crypto_encap(ni, m); 2900 if (k == NULL) { 2901 m_freem(m); 2902 return ENOBUFS; 2903 } 2904 /* Packet header may have moved, reset our local pointer. */ 2905 wh = mtod(m, struct ieee80211_frame *); 2906 } 2907 totlen = m->m_pkthdr.len; 2908 2909 if (ieee80211_radiotap_active_vap(vap)) { 2910 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 2911 2912 tap->wt_flags = 0; 2913 tap->wt_rate = rinfo->rate; 2914 if (k != NULL) 2915 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2916 2917 ieee80211_radiotap_tx(vap, m); 2918 } 2919 2920 /* Prepare TX firmware command. */ 2921 cmd = &ring->cmd[ring->cur]; 2922 cmd->code = IWN_CMD_TX_DATA; 2923 cmd->flags = 0; 2924 cmd->qid = ring->qid; 2925 cmd->idx = ring->cur; 2926 2927 tx = (struct iwn_cmd_data *)cmd->data; 2928 /* NB: No need to clear tx, all fields are reinitialized here. */ 2929 tx->scratch = 0; /* clear "scratch" area */ 2930 2931 flags = 0; 2932 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) 2933 flags |= IWN_TX_NEED_ACK; 2934 if ((wh->i_fc[0] & 2935 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 2936 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 2937 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 2938 2939 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2940 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 2941 2942 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2943 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2944 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2945 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2946 flags |= IWN_TX_NEED_RTS; 2947 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2948 ridx >= IWN_RIDX_OFDM6) { 2949 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2950 flags |= IWN_TX_NEED_CTS; 2951 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2952 flags |= IWN_TX_NEED_RTS; 2953 } 2954 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 2955 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 2956 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 2957 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 2958 flags |= IWN_TX_NEED_PROTECTION; 2959 } else 2960 flags |= IWN_TX_FULL_TXOP; 2961 } 2962 } 2963 2964 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 2965 type != IEEE80211_FC0_TYPE_DATA) 2966 tx->id = hal->broadcast_id; 2967 else 2968 tx->id = wn->id; 2969 2970 if (type == IEEE80211_FC0_TYPE_MGT) { 2971 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2972 2973 /* Tell HW to set timestamp in probe responses. */ 2974 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2975 flags |= IWN_TX_INSERT_TSTAMP; 2976 2977 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2978 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2979 tx->timeout = htole16(3); 2980 else 2981 tx->timeout = htole16(2); 2982 } else 2983 tx->timeout = htole16(0); 2984 2985 if (hdrlen & 3) { 2986 /* First segment length must be a multiple of 4. */ 2987 flags |= IWN_TX_NEED_PADDING; 2988 pad = 4 - (hdrlen & 3); 2989 } else 2990 pad = 0; 2991 2992 tx->len = htole16(totlen); 2993 tx->tid = 0; 2994 tx->rts_ntries = 60; 2995 tx->data_ntries = 15; 2996 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 2997 tx->plcp = rinfo->plcp; 2998 tx->rflags = rinfo->flags; 2999 if (tx->id == hal->broadcast_id) { 3000 /* Group or management frame. */ 3001 tx->linkq = 0; 3002 /* XXX Alternate between antenna A and B? */ 3003 txant = IWN_LSB(sc->txchainmask); 3004 tx->rflags |= IWN_RFLAG_ANT(txant); 3005 } else { 3006 tx->linkq = IWN_RIDX_OFDM54 - ridx; 3007 flags |= IWN_TX_LINKQ; /* enable MRR */ 3008 } 3009 3010 /* Set physical address of "scratch area". */ 3011 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3012 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3013 3014 /* Copy 802.11 header in TX command. */ 3015 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3016 3017 /* Trim 802.11 header. */ 3018 m_adj(m, hdrlen); 3019 tx->security = 0; 3020 tx->flags = htole32(flags); 3021 3022 if (m->m_len > 0) { 3023 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3024 m, segs, &nsegs, BUS_DMA_NOWAIT); 3025 if (error == EFBIG) { 3026 /* too many fragments, linearize */ 3027 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3028 if (mnew == NULL) { 3029 device_printf(sc->sc_dev, 3030 "%s: could not defrag mbuf\n", __func__); 3031 m_freem(m); 3032 return ENOBUFS; 3033 } 3034 m = mnew; 3035 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3036 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3037 } 3038 if (error != 0) { 3039 device_printf(sc->sc_dev, 3040 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3041 __func__, error); 3042 m_freem(m); 3043 return error; 3044 } 3045 } 3046 3047 data->m = m; 3048 data->ni = ni; 3049 3050 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3051 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3052 3053 /* Fill TX descriptor. */ 3054 desc->nsegs = 1 + nsegs; 3055 /* First DMA segment is used by the TX command. */ 3056 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3057 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3058 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3059 /* Other DMA segments are for data payload. */ 3060 for (i = 1; i <= nsegs; i++) { 3061 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3062 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3063 segs[i - 1].ds_len << 4); 3064 } 3065 3066 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3067 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3068 BUS_DMASYNC_PREWRITE); 3069 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3070 BUS_DMASYNC_PREWRITE); 3071 3072 #ifdef notyet 3073 /* Update TX scheduler. */ 3074 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3075 #endif 3076 3077 /* Kick TX ring. */ 3078 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3079 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3080 3081 /* Mark TX ring as full if we reach a certain threshold. */ 3082 if (++ring->queued > IWN_TX_RING_HIMARK) 3083 sc->qfullmsk |= 1 << ring->qid; 3084 3085 return 0; 3086 } 3087 3088 static int 3089 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3090 struct ieee80211_node *ni, struct iwn_tx_ring *ring, 3091 const struct ieee80211_bpf_params *params) 3092 { 3093 const struct iwn_hal *hal = sc->sc_hal; 3094 const struct iwn_rate *rinfo; 3095 struct ifnet *ifp = sc->sc_ifp; 3096 struct ieee80211vap *vap = ni->ni_vap; 3097 struct ieee80211com *ic = ifp->if_l2com; 3098 struct iwn_tx_cmd *cmd; 3099 struct iwn_cmd_data *tx; 3100 struct ieee80211_frame *wh; 3101 struct iwn_tx_desc *desc; 3102 struct iwn_tx_data *data; 3103 struct mbuf *mnew; 3104 bus_addr_t paddr; 3105 bus_dma_segment_t segs[IWN_MAX_SCATTER]; 3106 uint32_t flags; 3107 u_int hdrlen; 3108 int totlen, error, pad, nsegs = 0, i, rate; 3109 uint8_t ridx, type, txant; 3110 3111 IWN_LOCK_ASSERT(sc); 3112 3113 wh = mtod(m, struct ieee80211_frame *); 3114 hdrlen = ieee80211_anyhdrsize(wh); 3115 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3116 3117 desc = &ring->desc[ring->cur]; 3118 data = &ring->data[ring->cur]; 3119 3120 /* Choose a TX rate index. */ 3121 rate = params->ibp_rate0; 3122 if (!ieee80211_isratevalid(ic->ic_rt, rate)) { 3123 /* XXX fall back to mcast/mgmt rate? */ 3124 m_freem(m); 3125 return EINVAL; 3126 } 3127 ridx = iwn_plcp_signal(rate); 3128 rinfo = &iwn_rates[ridx]; 3129 3130 totlen = m->m_pkthdr.len; 3131 3132 /* Prepare TX firmware command. */ 3133 cmd = &ring->cmd[ring->cur]; 3134 cmd->code = IWN_CMD_TX_DATA; 3135 cmd->flags = 0; 3136 cmd->qid = ring->qid; 3137 cmd->idx = ring->cur; 3138 3139 tx = (struct iwn_cmd_data *)cmd->data; 3140 /* NB: No need to clear tx, all fields are reinitialized here. */ 3141 tx->scratch = 0; /* clear "scratch" area */ 3142 3143 flags = 0; 3144 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3145 flags |= IWN_TX_NEED_ACK; 3146 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3147 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3148 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3149 flags &= ~IWN_TX_NEED_RTS; 3150 flags |= IWN_TX_NEED_PROTECTION; 3151 } else 3152 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3153 } 3154 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3155 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3156 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3157 flags &= ~IWN_TX_NEED_CTS; 3158 flags |= IWN_TX_NEED_PROTECTION; 3159 } else 3160 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3161 } 3162 if (type == IEEE80211_FC0_TYPE_MGT) { 3163 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3164 3165 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3166 flags |= IWN_TX_INSERT_TSTAMP; 3167 3168 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3169 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3170 tx->timeout = htole16(3); 3171 else 3172 tx->timeout = htole16(2); 3173 } else 3174 tx->timeout = htole16(0); 3175 3176 if (hdrlen & 3) { 3177 /* First segment length must be a multiple of 4. */ 3178 flags |= IWN_TX_NEED_PADDING; 3179 pad = 4 - (hdrlen & 3); 3180 } else 3181 pad = 0; 3182 3183 if (ieee80211_radiotap_active_vap(vap)) { 3184 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3185 3186 tap->wt_flags = 0; 3187 tap->wt_rate = rate; 3188 3189 ieee80211_radiotap_tx(vap, m); 3190 } 3191 3192 tx->len = htole16(totlen); 3193 tx->tid = 0; 3194 tx->id = hal->broadcast_id; 3195 tx->rts_ntries = params->ibp_try1; 3196 tx->data_ntries = params->ibp_try0; 3197 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3198 tx->plcp = rinfo->plcp; 3199 tx->rflags = rinfo->flags; 3200 /* Group or management frame. */ 3201 tx->linkq = 0; 3202 txant = IWN_LSB(sc->txchainmask); 3203 tx->rflags |= IWN_RFLAG_ANT(txant); 3204 /* Set physical address of "scratch area". */ 3205 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); 3206 tx->loaddr = htole32(IWN_LOADDR(paddr)); 3207 tx->hiaddr = IWN_HIADDR(paddr); 3208 3209 /* Copy 802.11 header in TX command. */ 3210 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3211 3212 /* Trim 802.11 header. */ 3213 m_adj(m, hdrlen); 3214 tx->security = 0; 3215 tx->flags = htole32(flags); 3216 3217 if (m->m_len > 0) { 3218 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 3219 m, segs, &nsegs, BUS_DMA_NOWAIT); 3220 if (error == EFBIG) { 3221 /* Too many fragments, linearize. */ 3222 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3223 if (mnew == NULL) { 3224 device_printf(sc->sc_dev, 3225 "%s: could not defrag mbuf\n", __func__); 3226 m_freem(m); 3227 return ENOBUFS; 3228 } 3229 m = mnew; 3230 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, 3231 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); 3232 } 3233 if (error != 0) { 3234 device_printf(sc->sc_dev, 3235 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", 3236 __func__, error); 3237 m_freem(m); 3238 return error; 3239 } 3240 } 3241 3242 data->m = m; 3243 data->ni = ni; 3244 3245 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3246 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3247 3248 /* Fill TX descriptor. */ 3249 desc->nsegs = 1 + nsegs; 3250 /* First DMA segment is used by the TX command. */ 3251 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3252 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3253 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3254 /* Other DMA segments are for data payload. */ 3255 for (i = 1; i <= nsegs; i++) { 3256 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr)); 3257 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) | 3258 segs[i - 1].ds_len << 4); 3259 } 3260 3261 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3262 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3263 BUS_DMASYNC_PREWRITE); 3264 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3265 BUS_DMASYNC_PREWRITE); 3266 3267 #ifdef notyet 3268 /* Update TX scheduler. */ 3269 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3270 #endif 3271 3272 /* Kick TX ring. */ 3273 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3274 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3275 3276 /* Mark TX ring as full if we reach a certain threshold. */ 3277 if (++ring->queued > IWN_TX_RING_HIMARK) 3278 sc->qfullmsk |= 1 << ring->qid; 3279 3280 return 0; 3281 } 3282 3283 static int 3284 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3285 const struct ieee80211_bpf_params *params) 3286 { 3287 struct ieee80211com *ic = ni->ni_ic; 3288 struct ifnet *ifp = ic->ic_ifp; 3289 struct iwn_softc *sc = ifp->if_softc; 3290 struct iwn_tx_ring *txq; 3291 int error = 0; 3292 3293 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3294 ieee80211_free_node(ni); 3295 m_freem(m); 3296 return ENETDOWN; 3297 } 3298 3299 IWN_LOCK(sc); 3300 if (params == NULL) 3301 txq = &sc->txq[M_WME_GETAC(m)]; 3302 else 3303 txq = &sc->txq[params->ibp_pri & 3]; 3304 3305 if (params == NULL) { 3306 /* 3307 * Legacy path; interpret frame contents to decide 3308 * precisely how to send the frame. 3309 */ 3310 error = iwn_tx_data(sc, m, ni, txq); 3311 } else { 3312 /* 3313 * Caller supplied explicit parameters to use in 3314 * sending the frame. 3315 */ 3316 error = iwn_tx_data_raw(sc, m, ni, txq, params); 3317 } 3318 if (error != 0) { 3319 /* NB: m is reclaimed on tx failure */ 3320 ieee80211_free_node(ni); 3321 ifp->if_oerrors++; 3322 } 3323 IWN_UNLOCK(sc); 3324 return error; 3325 } 3326 3327 static void 3328 iwn_start(struct ifnet *ifp) 3329 { 3330 struct iwn_softc *sc = ifp->if_softc; 3331 3332 IWN_LOCK(sc); 3333 iwn_start_locked(ifp); 3334 IWN_UNLOCK(sc); 3335 } 3336 3337 static void 3338 iwn_start_locked(struct ifnet *ifp) 3339 { 3340 struct iwn_softc *sc = ifp->if_softc; 3341 struct ieee80211_node *ni; 3342 struct iwn_tx_ring *txq; 3343 struct mbuf *m; 3344 int pri; 3345 3346 IWN_LOCK_ASSERT(sc); 3347 3348 for (;;) { 3349 if (sc->qfullmsk != 0) { 3350 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3351 break; 3352 } 3353 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3354 if (m == NULL) 3355 break; 3356 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3357 pri = M_WME_GETAC(m); 3358 txq = &sc->txq[pri]; 3359 if (iwn_tx_data(sc, m, ni, txq) != 0) { 3360 ifp->if_oerrors++; 3361 ieee80211_free_node(ni); 3362 break; 3363 } 3364 sc->sc_tx_timer = 5; 3365 } 3366 } 3367 3368 static void 3369 iwn_watchdog(struct iwn_softc *sc) 3370 { 3371 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { 3372 struct ifnet *ifp = sc->sc_ifp; 3373 struct ieee80211com *ic = ifp->if_l2com; 3374 3375 if_printf(ifp, "device timeout\n"); 3376 ieee80211_runtask(ic, &sc->sc_reinit_task); 3377 } 3378 } 3379 3380 static int 3381 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3382 { 3383 struct iwn_softc *sc = ifp->if_softc; 3384 struct ieee80211com *ic = ifp->if_l2com; 3385 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3386 struct ifreq *ifr = (struct ifreq *) data; 3387 int error = 0, startall = 0, stop = 0; 3388 3389 switch (cmd) { 3390 case SIOCSIFFLAGS: 3391 IWN_LOCK(sc); 3392 if (ifp->if_flags & IFF_UP) { 3393 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3394 iwn_init_locked(sc); 3395 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 3396 startall = 1; 3397 else 3398 stop = 1; 3399 } 3400 } else { 3401 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3402 iwn_stop_locked(sc); 3403 } 3404 IWN_UNLOCK(sc); 3405 if (startall) 3406 ieee80211_start_all(ic); 3407 else if (vap != NULL && stop) 3408 ieee80211_stop(vap); 3409 break; 3410 case SIOCGIFMEDIA: 3411 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3412 break; 3413 case SIOCGIFADDR: 3414 error = ether_ioctl(ifp, cmd, data); 3415 break; 3416 default: 3417 error = EINVAL; 3418 break; 3419 } 3420 return error; 3421 } 3422 3423 /* 3424 * Send a command to the firmware. 3425 */ 3426 static int 3427 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3428 { 3429 struct iwn_tx_ring *ring = &sc->txq[4]; 3430 struct iwn_tx_desc *desc; 3431 struct iwn_tx_data *data; 3432 struct iwn_tx_cmd *cmd; 3433 struct mbuf *m; 3434 bus_addr_t paddr; 3435 int totlen, error; 3436 3437 IWN_LOCK_ASSERT(sc); 3438 3439 desc = &ring->desc[ring->cur]; 3440 data = &ring->data[ring->cur]; 3441 totlen = 4 + size; 3442 3443 if (size > sizeof cmd->data) { 3444 /* Command is too large to fit in a descriptor. */ 3445 if (totlen > MCLBYTES) 3446 return EINVAL; 3447 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3448 if (m == NULL) 3449 return ENOMEM; 3450 cmd = mtod(m, struct iwn_tx_cmd *); 3451 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3452 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3453 if (error != 0) { 3454 m_freem(m); 3455 return error; 3456 } 3457 data->m = m; 3458 } else { 3459 cmd = &ring->cmd[ring->cur]; 3460 paddr = data->cmd_paddr; 3461 } 3462 3463 cmd->code = code; 3464 cmd->flags = 0; 3465 cmd->qid = ring->qid; 3466 cmd->idx = ring->cur; 3467 memcpy(cmd->data, buf, size); 3468 3469 desc->nsegs = 1; 3470 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3471 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3472 3473 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 3474 __func__, iwn_intr_str(cmd->code), cmd->code, 3475 cmd->flags, cmd->qid, cmd->idx); 3476 3477 if (size > sizeof cmd->data) { 3478 bus_dmamap_sync(ring->data_dmat, data->map, 3479 BUS_DMASYNC_PREWRITE); 3480 } else { 3481 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3482 BUS_DMASYNC_PREWRITE); 3483 } 3484 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3485 BUS_DMASYNC_PREWRITE); 3486 3487 #ifdef notyet 3488 /* Update TX scheduler. */ 3489 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0); 3490 #endif 3491 3492 /* Kick command ring. */ 3493 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3494 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3495 3496 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 3497 } 3498 3499 static int 3500 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3501 { 3502 struct iwn4965_node_info hnode; 3503 caddr_t src, dst; 3504 3505 /* 3506 * We use the node structure for 5000 Series internally (it is 3507 * a superset of the one for 4965AGN). We thus copy the common 3508 * fields before sending the command. 3509 */ 3510 src = (caddr_t)node; 3511 dst = (caddr_t)&hnode; 3512 memcpy(dst, src, 48); 3513 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3514 memcpy(dst + 48, src + 72, 20); 3515 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3516 } 3517 3518 static int 3519 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3520 { 3521 /* Direct mapping. */ 3522 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3523 } 3524 3525 #if 0 /* HT */ 3526 static const uint8_t iwn_ridx_to_plcp[] = { 3527 10, 20, 55, 110, /* CCK */ 3528 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */ 3529 }; 3530 static const uint8_t iwn_siso_mcs_to_plcp[] = { 3531 0, 0, 0, 0, /* CCK */ 3532 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */ 3533 }; 3534 static const uint8_t iwn_mimo_mcs_to_plcp[] = { 3535 0, 0, 0, 0, /* CCK */ 3536 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */ 3537 }; 3538 #endif 3539 static const uint8_t iwn_prev_ridx[] = { 3540 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */ 3541 0, 0, 1, 5, /* CCK */ 3542 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */ 3543 }; 3544 3545 /* 3546 * Configure hardware link parameters for the specified 3547 * node operating on the specified channel. 3548 */ 3549 static int 3550 iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async) 3551 { 3552 struct ifnet *ifp = sc->sc_ifp; 3553 struct ieee80211com *ic = ifp->if_l2com; 3554 struct iwn_cmd_link_quality linkq; 3555 const struct iwn_rate *rinfo; 3556 int i; 3557 uint8_t txant, ridx; 3558 3559 /* Use the first valid TX antenna. */ 3560 txant = IWN_LSB(sc->txchainmask); 3561 3562 memset(&linkq, 0, sizeof linkq); 3563 linkq.id = id; 3564 linkq.antmsk_1stream = txant; 3565 linkq.antmsk_2stream = IWN_ANT_AB; 3566 linkq.ampdu_max = 31; 3567 linkq.ampdu_threshold = 3; 3568 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3569 3570 #if 0 /* HT */ 3571 if (IEEE80211_IS_CHAN_HT(c)) 3572 linkq.mimo = 1; 3573 #endif 3574 3575 if (id == IWN_ID_BSS) 3576 ridx = IWN_RIDX_OFDM54; 3577 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) 3578 ridx = IWN_RIDX_OFDM6; 3579 else 3580 ridx = IWN_RIDX_CCK1; 3581 3582 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 3583 rinfo = &iwn_rates[ridx]; 3584 #if 0 /* HT */ 3585 if (IEEE80211_IS_CHAN_HT40(c)) { 3586 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx] 3587 | IWN_RIDX_MCS; 3588 linkq.retry[i].rflags = IWN_RFLAG_HT 3589 | IWN_RFLAG_HT40; 3590 /* XXX shortGI */ 3591 } else if (IEEE80211_IS_CHAN_HT(c)) { 3592 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx] 3593 | IWN_RIDX_MCS; 3594 linkq.retry[i].rflags = IWN_RFLAG_HT; 3595 /* XXX shortGI */ 3596 } else 3597 #endif 3598 { 3599 linkq.retry[i].plcp = rinfo->plcp; 3600 linkq.retry[i].rflags = rinfo->flags; 3601 } 3602 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3603 ridx = iwn_prev_ridx[ridx]; 3604 } 3605 #ifdef IWN_DEBUG 3606 if (sc->sc_debug & IWN_DEBUG_STATE) { 3607 printf("%s: set link quality for node %d, mimo %d ssmask %d\n", 3608 __func__, id, linkq.mimo, linkq.antmsk_1stream); 3609 printf("%s:", __func__); 3610 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) 3611 printf(" %d:%x", linkq.retry[i].plcp, 3612 linkq.retry[i].rflags); 3613 printf("\n"); 3614 } 3615 #endif 3616 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 3617 } 3618 3619 /* 3620 * Broadcast node is used to send group-addressed and management frames. 3621 */ 3622 static int 3623 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 3624 { 3625 const struct iwn_hal *hal = sc->sc_hal; 3626 struct ifnet *ifp = sc->sc_ifp; 3627 struct iwn_node_info node; 3628 int error; 3629 3630 memset(&node, 0, sizeof node); 3631 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3632 node.id = hal->broadcast_id; 3633 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 3634 error = hal->add_node(sc, &node, async); 3635 if (error != 0) 3636 return error; 3637 3638 error = iwn_set_link_quality(sc, hal->broadcast_id, async); 3639 return error; 3640 } 3641 3642 static int 3643 iwn_wme_update(struct ieee80211com *ic) 3644 { 3645 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3646 #define IWN_TXOP_TO_US(v) (v<<5) 3647 struct iwn_softc *sc = ic->ic_ifp->if_softc; 3648 struct iwn_edca_params cmd; 3649 int i; 3650 3651 memset(&cmd, 0, sizeof cmd); 3652 cmd.flags = htole32(IWN_EDCA_UPDATE); 3653 for (i = 0; i < WME_NUM_AC; i++) { 3654 const struct wmeParams *wmep = 3655 &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; 3656 cmd.ac[i].aifsn = wmep->wmep_aifsn; 3657 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin)); 3658 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax)); 3659 cmd.ac[i].txoplimit = 3660 htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit)); 3661 } 3662 IEEE80211_UNLOCK(ic); 3663 IWN_LOCK(sc); 3664 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/); 3665 IWN_UNLOCK(sc); 3666 IEEE80211_LOCK(ic); 3667 return 0; 3668 #undef IWN_TXOP_TO_US 3669 #undef IWN_EXP2 3670 } 3671 3672 static void 3673 iwn_update_mcast(struct ifnet *ifp) 3674 { 3675 /* Ignore */ 3676 } 3677 3678 static void 3679 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3680 { 3681 struct iwn_cmd_led led; 3682 3683 /* Clear microcode LED ownership. */ 3684 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 3685 3686 led.which = which; 3687 led.unit = htole32(10000); /* on/off in unit of 100ms */ 3688 led.off = off; 3689 led.on = on; 3690 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 3691 } 3692 3693 /* 3694 * Set the critical temperature at which the firmware will stop the radio 3695 * and notify us. 3696 */ 3697 static int 3698 iwn_set_critical_temp(struct iwn_softc *sc) 3699 { 3700 struct iwn_critical_temp crit; 3701 int32_t temp; 3702 3703 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 3704 3705 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 3706 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 3707 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3708 temp = IWN_CTOK(110); 3709 else 3710 temp = 110; 3711 memset(&crit, 0, sizeof crit); 3712 crit.tempR = htole32(temp); 3713 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", 3714 temp); 3715 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 3716 } 3717 3718 static int 3719 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 3720 { 3721 struct iwn_cmd_timing cmd; 3722 uint64_t val, mod; 3723 3724 memset(&cmd, 0, sizeof cmd); 3725 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3726 cmd.bintval = htole16(ni->ni_intval); 3727 cmd.lintval = htole16(10); 3728 3729 /* Compute remaining time until next beacon. */ 3730 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ 3731 mod = le64toh(cmd.tstamp) % val; 3732 cmd.binitval = htole32((uint32_t)(val - mod)); 3733 3734 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3735 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3736 3737 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 3738 } 3739 3740 static void 3741 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 3742 { 3743 struct ifnet *ifp = sc->sc_ifp; 3744 struct ieee80211com *ic = ifp->if_l2com; 3745 3746 /* Adjust TX power if need be (delta >= 3 degC.) */ 3747 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 3748 __func__, sc->temp, temp); 3749 if (abs(temp - sc->temp) >= 3) { 3750 /* Record temperature of last calibration. */ 3751 sc->temp = temp; 3752 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 3753 } 3754 } 3755 3756 /* 3757 * Set TX power for current channel (each rate has its own power settings). 3758 * This function takes into account the regulatory information from EEPROM, 3759 * the current temperature and the current voltage. 3760 */ 3761 static int 3762 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3763 int async) 3764 { 3765 /* Fixed-point arithmetic division using a n-bit fractional part. */ 3766 #define fdivround(a, b, n) \ 3767 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3768 /* Linear interpolation. */ 3769 #define interpolate(x, x1, y1, x2, y2, n) \ 3770 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3771 3772 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 3773 struct ifnet *ifp = sc->sc_ifp; 3774 struct ieee80211com *ic = ifp->if_l2com; 3775 struct iwn_ucode_info *uc = &sc->ucode_info; 3776 struct iwn4965_cmd_txpower cmd; 3777 struct iwn4965_eeprom_chan_samples *chans; 3778 int32_t vdiff, tdiff; 3779 int i, c, grp, maxpwr; 3780 const uint8_t *rf_gain, *dsp_gain; 3781 uint8_t chan; 3782 3783 /* Retrieve channel number. */ 3784 chan = ieee80211_chan2ieee(ic, ch); 3785 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 3786 chan); 3787 3788 memset(&cmd, 0, sizeof cmd); 3789 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 3790 cmd.chan = chan; 3791 3792 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 3793 maxpwr = sc->maxpwr5GHz; 3794 rf_gain = iwn4965_rf_gain_5ghz; 3795 dsp_gain = iwn4965_dsp_gain_5ghz; 3796 } else { 3797 maxpwr = sc->maxpwr2GHz; 3798 rf_gain = iwn4965_rf_gain_2ghz; 3799 dsp_gain = iwn4965_dsp_gain_2ghz; 3800 } 3801 3802 /* Compute voltage compensation. */ 3803 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 3804 if (vdiff > 0) 3805 vdiff *= 2; 3806 if (abs(vdiff) > 2) 3807 vdiff = 0; 3808 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3809 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 3810 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 3811 3812 /* Get channel attenuation group. */ 3813 if (chan <= 20) /* 1-20 */ 3814 grp = 4; 3815 else if (chan <= 43) /* 34-43 */ 3816 grp = 0; 3817 else if (chan <= 70) /* 44-70 */ 3818 grp = 1; 3819 else if (chan <= 124) /* 71-124 */ 3820 grp = 2; 3821 else /* 125-200 */ 3822 grp = 3; 3823 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3824 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 3825 3826 /* Get channel sub-band. */ 3827 for (i = 0; i < IWN_NBANDS; i++) 3828 if (sc->bands[i].lo != 0 && 3829 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 3830 break; 3831 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 3832 return EINVAL; 3833 chans = sc->bands[i].chans; 3834 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3835 "%s: chan %d sub-band=%d\n", __func__, chan, i); 3836 3837 for (c = 0; c < 2; c++) { 3838 uint8_t power, gain, temp; 3839 int maxchpwr, pwr, ridx, idx; 3840 3841 power = interpolate(chan, 3842 chans[0].num, chans[0].samples[c][1].power, 3843 chans[1].num, chans[1].samples[c][1].power, 1); 3844 gain = interpolate(chan, 3845 chans[0].num, chans[0].samples[c][1].gain, 3846 chans[1].num, chans[1].samples[c][1].gain, 1); 3847 temp = interpolate(chan, 3848 chans[0].num, chans[0].samples[c][1].temp, 3849 chans[1].num, chans[1].samples[c][1].temp, 1); 3850 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3851 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 3852 __func__, c, power, gain, temp); 3853 3854 /* Compute temperature compensation. */ 3855 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 3856 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3857 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 3858 __func__, tdiff, sc->temp, temp); 3859 3860 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 3861 /* Convert dBm to half-dBm. */ 3862 maxchpwr = sc->maxpwr[chan] * 2; 3863 if ((ridx / 8) & 1) 3864 maxchpwr -= 6; /* MIMO 2T: -3dB */ 3865 3866 pwr = maxpwr; 3867 3868 /* Adjust TX power based on rate. */ 3869 if ((ridx % 8) == 5) 3870 pwr -= 15; /* OFDM48: -7.5dB */ 3871 else if ((ridx % 8) == 6) 3872 pwr -= 17; /* OFDM54: -8.5dB */ 3873 else if ((ridx % 8) == 7) 3874 pwr -= 20; /* OFDM60: -10dB */ 3875 else 3876 pwr -= 10; /* Others: -5dB */ 3877 3878 /* Do not exceed channel max TX power. */ 3879 if (pwr > maxchpwr) 3880 pwr = maxchpwr; 3881 3882 idx = gain - (pwr - power) - tdiff - vdiff; 3883 if ((ridx / 8) & 1) /* MIMO */ 3884 idx += (int32_t)le32toh(uc->atten[grp][c]); 3885 3886 if (cmd.band == 0) 3887 idx += 9; /* 5GHz */ 3888 if (ridx == IWN_RIDX_MAX) 3889 idx += 5; /* CCK */ 3890 3891 /* Make sure idx stays in a valid range. */ 3892 if (idx < 0) 3893 idx = 0; 3894 else if (idx > IWN4965_MAX_PWR_INDEX) 3895 idx = IWN4965_MAX_PWR_INDEX; 3896 3897 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3898 "%s: Tx chain %d, rate idx %d: power=%d\n", 3899 __func__, c, ridx, idx); 3900 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 3901 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 3902 } 3903 } 3904 3905 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 3906 "%s: set tx power for chan %d\n", __func__, chan); 3907 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 3908 3909 #undef interpolate 3910 #undef fdivround 3911 } 3912 3913 static int 3914 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 3915 int async) 3916 { 3917 struct iwn5000_cmd_txpower cmd; 3918 3919 /* 3920 * TX power calibration is handled automatically by the firmware 3921 * for 5000 Series. 3922 */ 3923 memset(&cmd, 0, sizeof cmd); 3924 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 3925 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 3926 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 3927 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 3928 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 3929 } 3930 3931 /* 3932 * Retrieve the maximum RSSI (in dBm) among receivers. 3933 */ 3934 static int 3935 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3936 { 3937 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 3938 uint8_t mask, agc; 3939 int rssi; 3940 3941 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 3942 agc = (le16toh(phy->agc) >> 7) & 0x7f; 3943 3944 rssi = 0; 3945 #if 0 3946 if (mask & IWN_ANT_A) /* Ant A */ 3947 rssi = max(rssi, phy->rssi[0]); 3948 if (mask & IWN_ATH_B) /* Ant B */ 3949 rssi = max(rssi, phy->rssi[2]); 3950 if (mask & IWN_ANT_C) /* Ant C */ 3951 rssi = max(rssi, phy->rssi[4]); 3952 #else 3953 rssi = max(rssi, phy->rssi[0]); 3954 rssi = max(rssi, phy->rssi[2]); 3955 rssi = max(rssi, phy->rssi[4]); 3956 #endif 3957 3958 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d " 3959 "result %d\n", __func__, agc, mask, 3960 phy->rssi[0], phy->rssi[2], phy->rssi[4], 3961 rssi - agc - IWN_RSSI_TO_DBM); 3962 return rssi - agc - IWN_RSSI_TO_DBM; 3963 } 3964 3965 static int 3966 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 3967 { 3968 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 3969 int rssi; 3970 uint8_t agc; 3971 3972 agc = (le32toh(phy->agc) >> 9) & 0x7f; 3973 3974 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 3975 le16toh(phy->rssi[1]) & 0xff); 3976 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 3977 3978 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d " 3979 "result %d\n", __func__, agc, 3980 phy->rssi[0], phy->rssi[1], phy->rssi[2], 3981 rssi - agc - IWN_RSSI_TO_DBM); 3982 return rssi - agc - IWN_RSSI_TO_DBM; 3983 } 3984 3985 /* 3986 * Retrieve the average noise (in dBm) among receivers. 3987 */ 3988 static int 3989 iwn_get_noise(const struct iwn_rx_general_stats *stats) 3990 { 3991 int i, total, nbant, noise; 3992 3993 total = nbant = 0; 3994 for (i = 0; i < 3; i++) { 3995 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 3996 continue; 3997 total += noise; 3998 nbant++; 3999 } 4000 /* There should be at least one antenna but check anyway. */ 4001 return (nbant == 0) ? -127 : (total / nbant) - 107; 4002 } 4003 4004 /* 4005 * Compute temperature (in degC) from last received statistics. 4006 */ 4007 static int 4008 iwn4965_get_temperature(struct iwn_softc *sc) 4009 { 4010 struct iwn_ucode_info *uc = &sc->ucode_info; 4011 int32_t r1, r2, r3, r4, temp; 4012 4013 r1 = le32toh(uc->temp[0].chan20MHz); 4014 r2 = le32toh(uc->temp[1].chan20MHz); 4015 r3 = le32toh(uc->temp[2].chan20MHz); 4016 r4 = le32toh(sc->rawtemp); 4017 4018 if (r1 == r3) /* Prevents division by 0 (should not happen.) */ 4019 return 0; 4020 4021 /* Sign-extend 23-bit R4 value to 32-bit. */ 4022 r4 = (r4 << 8) >> 8; 4023 /* Compute temperature in Kelvin. */ 4024 temp = (259 * (r4 - r2)) / (r3 - r1); 4025 temp = (temp * 97) / 100 + 8; 4026 4027 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4028 IWN_KTOC(temp)); 4029 return IWN_KTOC(temp); 4030 } 4031 4032 static int 4033 iwn5000_get_temperature(struct iwn_softc *sc) 4034 { 4035 int32_t temp; 4036 4037 /* 4038 * Temperature is not used by the driver for 5000 Series because 4039 * TX power calibration is handled by firmware. We export it to 4040 * users through the sensor framework though. 4041 */ 4042 temp = le32toh(sc->rawtemp); 4043 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4044 temp = (temp / -5) + sc->temp_off; 4045 temp = IWN_KTOC(temp); 4046 } 4047 return temp; 4048 } 4049 4050 /* 4051 * Initialize sensitivity calibration state machine. 4052 */ 4053 static int 4054 iwn_init_sensitivity(struct iwn_softc *sc) 4055 { 4056 const struct iwn_hal *hal = sc->sc_hal; 4057 struct iwn_calib_state *calib = &sc->calib; 4058 uint32_t flags; 4059 int error; 4060 4061 /* Reset calibration state machine. */ 4062 memset(calib, 0, sizeof (*calib)); 4063 calib->state = IWN_CALIB_STATE_INIT; 4064 calib->cck_state = IWN_CCK_STATE_HIFA; 4065 /* Set initial correlation values. */ 4066 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4067 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4068 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4069 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4070 calib->cck_x4 = 125; 4071 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4072 calib->energy_cck = sc->limits->energy_cck; 4073 4074 /* Write initial sensitivity. */ 4075 error = iwn_send_sensitivity(sc); 4076 if (error != 0) 4077 return error; 4078 4079 /* Write initial gains. */ 4080 error = hal->init_gains(sc); 4081 if (error != 0) 4082 return error; 4083 4084 /* Request statistics at each beacon interval. */ 4085 flags = 0; 4086 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__); 4087 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4088 } 4089 4090 /* 4091 * Collect noise and RSSI statistics for the first 20 beacons received 4092 * after association and use them to determine connected antennas and 4093 * to set differential gains. 4094 */ 4095 static void 4096 iwn_collect_noise(struct iwn_softc *sc, 4097 const struct iwn_rx_general_stats *stats) 4098 { 4099 const struct iwn_hal *hal = sc->sc_hal; 4100 struct iwn_calib_state *calib = &sc->calib; 4101 uint32_t val; 4102 int i; 4103 4104 /* Accumulate RSSI and noise for all 3 antennas. */ 4105 for (i = 0; i < 3; i++) { 4106 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4107 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4108 } 4109 /* NB: We update differential gains only once after 20 beacons. */ 4110 if (++calib->nbeacons < 20) 4111 return; 4112 4113 /* Determine highest average RSSI. */ 4114 val = MAX(calib->rssi[0], calib->rssi[1]); 4115 val = MAX(calib->rssi[2], val); 4116 4117 /* Determine which antennas are connected. */ 4118 sc->chainmask = sc->rxchainmask; 4119 for (i = 0; i < 3; i++) 4120 if (val - calib->rssi[i] > 15 * 20) 4121 sc->chainmask &= ~(1 << i); 4122 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4123 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4124 __func__, sc->rxchainmask, sc->chainmask); 4125 4126 /* If none of the TX antennas are connected, keep at least one. */ 4127 if ((sc->chainmask & sc->txchainmask) == 0) 4128 sc->chainmask |= IWN_LSB(sc->txchainmask); 4129 4130 (void)hal->set_gains(sc); 4131 calib->state = IWN_CALIB_STATE_RUN; 4132 4133 #ifdef notyet 4134 /* XXX Disable RX chains with no antennas connected. */ 4135 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4136 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4137 #endif 4138 4139 #if 0 4140 /* XXX: not yet */ 4141 /* Enable power-saving mode if requested by user. */ 4142 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 4143 (void)iwn_set_pslevel(sc, 0, 3, 1); 4144 #endif 4145 } 4146 4147 static int 4148 iwn4965_init_gains(struct iwn_softc *sc) 4149 { 4150 struct iwn_phy_calib_gain cmd; 4151 4152 memset(&cmd, 0, sizeof cmd); 4153 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4154 /* Differential gains initially set to 0 for all 3 antennas. */ 4155 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4156 "%s: setting initial differential gains\n", __func__); 4157 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4158 } 4159 4160 static int 4161 iwn5000_init_gains(struct iwn_softc *sc) 4162 { 4163 struct iwn_phy_calib cmd; 4164 4165 memset(&cmd, 0, sizeof cmd); 4166 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 4167 cmd.ngroups = 1; 4168 cmd.isvalid = 1; 4169 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4170 "%s: setting initial differential gains\n", __func__); 4171 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4172 } 4173 4174 static int 4175 iwn4965_set_gains(struct iwn_softc *sc) 4176 { 4177 struct iwn_calib_state *calib = &sc->calib; 4178 struct iwn_phy_calib_gain cmd; 4179 int i, delta, noise; 4180 4181 /* Get minimal noise among connected antennas. */ 4182 noise = INT_MAX; /* NB: There's at least one antenna. */ 4183 for (i = 0; i < 3; i++) 4184 if (sc->chainmask & (1 << i)) 4185 noise = MIN(calib->noise[i], noise); 4186 4187 memset(&cmd, 0, sizeof cmd); 4188 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4189 /* Set differential gains for connected antennas. */ 4190 for (i = 0; i < 3; i++) { 4191 if (sc->chainmask & (1 << i)) { 4192 /* Compute attenuation (in unit of 1.5dB). */ 4193 delta = (noise - (int32_t)calib->noise[i]) / 30; 4194 /* NB: delta <= 0 */ 4195 /* Limit to [-4.5dB,0]. */ 4196 cmd.gain[i] = MIN(abs(delta), 3); 4197 if (delta < 0) 4198 cmd.gain[i] |= 1 << 2; /* sign bit */ 4199 } 4200 } 4201 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4202 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4203 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4204 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4205 } 4206 4207 static int 4208 iwn5000_set_gains(struct iwn_softc *sc) 4209 { 4210 struct iwn_calib_state *calib = &sc->calib; 4211 struct iwn_phy_calib_gain cmd; 4212 int i, ant, delta, div; 4213 4214 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4215 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4216 4217 memset(&cmd, 0, sizeof cmd); 4218 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN; 4219 cmd.ngroups = 1; 4220 cmd.isvalid = 1; 4221 /* Get first available RX antenna as referential. */ 4222 ant = IWN_LSB(sc->rxchainmask); 4223 /* Set differential gains for other antennas. */ 4224 for (i = ant + 1; i < 3; i++) { 4225 if (sc->chainmask & (1 << i)) { 4226 /* The delta is relative to antenna "ant". */ 4227 delta = ((int32_t)calib->noise[ant] - 4228 (int32_t)calib->noise[i]) / div; 4229 /* Limit to [-4.5dB,+4.5dB]. */ 4230 cmd.gain[i - 1] = MIN(abs(delta), 3); 4231 if (delta < 0) 4232 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4233 } 4234 } 4235 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4236 "setting differential gains Ant B/C: %x/%x (%x)\n", 4237 cmd.gain[0], cmd.gain[1], sc->chainmask); 4238 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4239 } 4240 4241 /* 4242 * Tune RF RX sensitivity based on the number of false alarms detected 4243 * during the last beacon period. 4244 */ 4245 static void 4246 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4247 { 4248 #define inc(val, inc, max) \ 4249 if ((val) < (max)) { \ 4250 if ((val) < (max) - (inc)) \ 4251 (val) += (inc); \ 4252 else \ 4253 (val) = (max); \ 4254 needs_update = 1; \ 4255 } 4256 #define dec(val, dec, min) \ 4257 if ((val) > (min)) { \ 4258 if ((val) > (min) + (dec)) \ 4259 (val) -= (dec); \ 4260 else \ 4261 (val) = (min); \ 4262 needs_update = 1; \ 4263 } 4264 4265 const struct iwn_sensitivity_limits *limits = sc->limits; 4266 struct iwn_calib_state *calib = &sc->calib; 4267 uint32_t val, rxena, fa; 4268 uint32_t energy[3], energy_min; 4269 uint8_t noise[3], noise_ref; 4270 int i, needs_update = 0; 4271 4272 /* Check that we've been enabled long enough. */ 4273 rxena = le32toh(stats->general.load); 4274 if (rxena == 0) 4275 return; 4276 4277 /* Compute number of false alarms since last call for OFDM. */ 4278 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4279 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4280 fa *= 200 * 1024; /* 200TU */ 4281 4282 /* Save counters values for next call. */ 4283 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4284 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4285 4286 if (fa > 50 * rxena) { 4287 /* High false alarm count, decrease sensitivity. */ 4288 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4289 "%s: OFDM high false alarm count: %u\n", __func__, fa); 4290 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4291 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4292 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4293 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4294 4295 } else if (fa < 5 * rxena) { 4296 /* Low false alarm count, increase sensitivity. */ 4297 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4298 "%s: OFDM low false alarm count: %u\n", __func__, fa); 4299 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4300 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4301 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4302 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4303 } 4304 4305 /* Compute maximum noise among 3 receivers. */ 4306 for (i = 0; i < 3; i++) 4307 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4308 val = MAX(noise[0], noise[1]); 4309 val = MAX(noise[2], val); 4310 /* Insert it into our samples table. */ 4311 calib->noise_samples[calib->cur_noise_sample] = val; 4312 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4313 4314 /* Compute maximum noise among last 20 samples. */ 4315 noise_ref = calib->noise_samples[0]; 4316 for (i = 1; i < 20; i++) 4317 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4318 4319 /* Compute maximum energy among 3 receivers. */ 4320 for (i = 0; i < 3; i++) 4321 energy[i] = le32toh(stats->general.energy[i]); 4322 val = MIN(energy[0], energy[1]); 4323 val = MIN(energy[2], val); 4324 /* Insert it into our samples table. */ 4325 calib->energy_samples[calib->cur_energy_sample] = val; 4326 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4327 4328 /* Compute minimum energy among last 10 samples. */ 4329 energy_min = calib->energy_samples[0]; 4330 for (i = 1; i < 10; i++) 4331 energy_min = MAX(energy_min, calib->energy_samples[i]); 4332 energy_min += 6; 4333 4334 /* Compute number of false alarms since last call for CCK. */ 4335 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4336 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4337 fa *= 200 * 1024; /* 200TU */ 4338 4339 /* Save counters values for next call. */ 4340 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4341 calib->fa_cck = le32toh(stats->cck.fa); 4342 4343 if (fa > 50 * rxena) { 4344 /* High false alarm count, decrease sensitivity. */ 4345 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4346 "%s: CCK high false alarm count: %u\n", __func__, fa); 4347 calib->cck_state = IWN_CCK_STATE_HIFA; 4348 calib->low_fa = 0; 4349 4350 if (calib->cck_x4 > 160) { 4351 calib->noise_ref = noise_ref; 4352 if (calib->energy_cck > 2) 4353 dec(calib->energy_cck, 2, energy_min); 4354 } 4355 if (calib->cck_x4 < 160) { 4356 calib->cck_x4 = 161; 4357 needs_update = 1; 4358 } else 4359 inc(calib->cck_x4, 3, limits->max_cck_x4); 4360 4361 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4362 4363 } else if (fa < 5 * rxena) { 4364 /* Low false alarm count, increase sensitivity. */ 4365 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4366 "%s: CCK low false alarm count: %u\n", __func__, fa); 4367 calib->cck_state = IWN_CCK_STATE_LOFA; 4368 calib->low_fa++; 4369 4370 if (calib->cck_state != IWN_CCK_STATE_INIT && 4371 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4372 calib->low_fa > 100)) { 4373 inc(calib->energy_cck, 2, limits->min_energy_cck); 4374 dec(calib->cck_x4, 3, limits->min_cck_x4); 4375 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4376 } 4377 } else { 4378 /* Not worth to increase or decrease sensitivity. */ 4379 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4380 "%s: CCK normal false alarm count: %u\n", __func__, fa); 4381 calib->low_fa = 0; 4382 calib->noise_ref = noise_ref; 4383 4384 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4385 /* Previous interval had many false alarms. */ 4386 dec(calib->energy_cck, 8, energy_min); 4387 } 4388 calib->cck_state = IWN_CCK_STATE_INIT; 4389 } 4390 4391 if (needs_update) 4392 (void)iwn_send_sensitivity(sc); 4393 #undef dec 4394 #undef inc 4395 } 4396 4397 static int 4398 iwn_send_sensitivity(struct iwn_softc *sc) 4399 { 4400 struct iwn_calib_state *calib = &sc->calib; 4401 struct iwn_sensitivity_cmd cmd; 4402 4403 memset(&cmd, 0, sizeof cmd); 4404 cmd.which = IWN_SENSITIVITY_WORKTBL; 4405 /* OFDM modulation. */ 4406 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4407 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4408 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4409 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4410 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4411 cmd.energy_ofdm_th = htole16(62); 4412 /* CCK modulation. */ 4413 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4414 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4415 cmd.energy_cck = htole16(calib->energy_cck); 4416 /* Barker modulation: use default values. */ 4417 cmd.corr_barker = htole16(190); 4418 cmd.corr_barker_mrc = htole16(390); 4419 4420 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4421 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 4422 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4423 calib->ofdm_mrc_x4, calib->cck_x4, 4424 calib->cck_mrc_x4, calib->energy_cck); 4425 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1); 4426 } 4427 4428 /* 4429 * Set STA mode power saving level (between 0 and 5). 4430 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4431 */ 4432 static int 4433 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4434 { 4435 const struct iwn_pmgt *pmgt; 4436 struct iwn_pmgt_cmd cmd; 4437 uint32_t max, skip_dtim; 4438 uint32_t tmp; 4439 int i; 4440 4441 /* Select which PS parameters to use. */ 4442 if (dtim <= 2) 4443 pmgt = &iwn_pmgt[0][level]; 4444 else if (dtim <= 10) 4445 pmgt = &iwn_pmgt[1][level]; 4446 else 4447 pmgt = &iwn_pmgt[2][level]; 4448 4449 memset(&cmd, 0, sizeof cmd); 4450 if (level != 0) /* not CAM */ 4451 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4452 if (level == 5) 4453 cmd.flags |= htole16(IWN_PS_FAST_PD); 4454 /* Retrieve PCIe Active State Power Management (ASPM). */ 4455 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4456 if (!(tmp & 0x1)) /* L0s Entry disabled. */ 4457 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4458 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4459 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4460 4461 if (dtim == 0) { 4462 dtim = 1; 4463 skip_dtim = 0; 4464 } else 4465 skip_dtim = pmgt->skip_dtim; 4466 if (skip_dtim != 0) { 4467 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4468 max = pmgt->intval[4]; 4469 if (max == (uint32_t)-1) 4470 max = dtim * (skip_dtim + 1); 4471 else if (max > dtim) 4472 max = (max / dtim) * dtim; 4473 } else 4474 max = dtim; 4475 for (i = 0; i < 5; i++) 4476 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4477 4478 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 4479 level); 4480 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4481 } 4482 4483 static int 4484 iwn_config(struct iwn_softc *sc) 4485 { 4486 const struct iwn_hal *hal = sc->sc_hal; 4487 struct ifnet *ifp = sc->sc_ifp; 4488 struct ieee80211com *ic = ifp->if_l2com; 4489 struct iwn_bluetooth bluetooth; 4490 uint32_t txmask; 4491 int error; 4492 uint16_t rxchain; 4493 4494 /* Configure valid TX chains for 5000 Series. */ 4495 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4496 txmask = htole32(sc->txchainmask); 4497 DPRINTF(sc, IWN_DEBUG_RESET, 4498 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 4499 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4500 sizeof txmask, 0); 4501 if (error != 0) { 4502 device_printf(sc->sc_dev, 4503 "%s: could not configure valid TX chains, " 4504 "error %d\n", __func__, error); 4505 return error; 4506 } 4507 } 4508 4509 /* Configure bluetooth coexistence. */ 4510 memset(&bluetooth, 0, sizeof bluetooth); 4511 bluetooth.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4512 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF; 4513 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF; 4514 DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n", 4515 __func__); 4516 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0); 4517 if (error != 0) { 4518 device_printf(sc->sc_dev, 4519 "%s: could not configure bluetooth coexistence, error %d\n", 4520 __func__, error); 4521 return error; 4522 } 4523 4524 /* Set mode, channel, RX filter and enable RX. */ 4525 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 4526 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 4527 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 4528 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 4529 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4530 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 4531 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4532 switch (ic->ic_opmode) { 4533 case IEEE80211_M_STA: 4534 sc->rxon.mode = IWN_MODE_STA; 4535 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 4536 break; 4537 case IEEE80211_M_MONITOR: 4538 sc->rxon.mode = IWN_MODE_MONITOR; 4539 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 4540 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 4541 break; 4542 default: 4543 /* Should not get there. */ 4544 break; 4545 } 4546 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 4547 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 4548 sc->rxon.ht_single_mask = 0xff; 4549 sc->rxon.ht_dual_mask = 0xff; 4550 sc->rxon.ht_triple_mask = 0xff; 4551 rxchain = 4552 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4553 IWN_RXCHAIN_MIMO_COUNT(2) | 4554 IWN_RXCHAIN_IDLE_COUNT(2); 4555 sc->rxon.rxchain = htole16(rxchain); 4556 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 4557 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0); 4558 if (error != 0) { 4559 device_printf(sc->sc_dev, 4560 "%s: RXON command failed\n", __func__); 4561 return error; 4562 } 4563 4564 error = iwn_add_broadcast_node(sc, 0); 4565 if (error != 0) { 4566 device_printf(sc->sc_dev, 4567 "%s: could not add broadcast node\n", __func__); 4568 return error; 4569 } 4570 4571 /* Configuration has changed, set TX power accordingly. */ 4572 error = hal->set_txpower(sc, ic->ic_curchan, 0); 4573 if (error != 0) { 4574 device_printf(sc->sc_dev, 4575 "%s: could not set TX power\n", __func__); 4576 return error; 4577 } 4578 4579 error = iwn_set_critical_temp(sc); 4580 if (error != 0) { 4581 device_printf(sc->sc_dev, 4582 "%s: ccould not set critical temperature\n", __func__); 4583 return error; 4584 } 4585 4586 /* Set power saving level to CAM during initialization. */ 4587 error = iwn_set_pslevel(sc, 0, 0, 0); 4588 if (error != 0) { 4589 device_printf(sc->sc_dev, 4590 "%s: could not set power saving level\n", __func__); 4591 return error; 4592 } 4593 return 0; 4594 } 4595 4596 static int 4597 iwn_scan(struct iwn_softc *sc) 4598 { 4599 struct ifnet *ifp = sc->sc_ifp; 4600 struct ieee80211com *ic = ifp->if_l2com; 4601 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 4602 struct iwn_scan_hdr *hdr; 4603 struct iwn_cmd_data *tx; 4604 struct iwn_scan_essid *essid; 4605 struct iwn_scan_chan *chan; 4606 struct ieee80211_frame *wh; 4607 struct ieee80211_rateset *rs; 4608 struct ieee80211_channel *c; 4609 int buflen, error, nrates; 4610 uint16_t rxchain; 4611 uint8_t *buf, *frm, txant; 4612 4613 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 4614 if (buf == NULL) { 4615 device_printf(sc->sc_dev, 4616 "%s: could not allocate buffer for scan command\n", 4617 __func__); 4618 return ENOMEM; 4619 } 4620 hdr = (struct iwn_scan_hdr *)buf; 4621 4622 /* 4623 * Move to the next channel if no frames are received within 10ms 4624 * after sending the probe request. 4625 */ 4626 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 4627 hdr->quiet_threshold = htole16(1); /* min # of packets */ 4628 4629 /* Select antennas for scanning. */ 4630 rxchain = 4631 IWN_RXCHAIN_VALID(sc->rxchainmask) | 4632 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 4633 IWN_RXCHAIN_DRIVER_FORCE; 4634 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 4635 sc->hw_type == IWN_HW_REV_TYPE_4965) { 4636 /* Ant A must be avoided in 5GHz because of an HW bug. */ 4637 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC); 4638 } else /* Use all available RX antennas. */ 4639 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 4640 hdr->rxchain = htole16(rxchain); 4641 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 4642 4643 tx = (struct iwn_cmd_data *)(hdr + 1); 4644 tx->flags = htole32(IWN_TX_AUTO_SEQ); 4645 tx->id = sc->sc_hal->broadcast_id; 4646 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4647 4648 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { 4649 /* Send probe requests at 6Mbps. */ 4650 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 4651 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 4652 } else { 4653 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 4654 /* Send probe requests at 1Mbps. */ 4655 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 4656 tx->rflags = IWN_RFLAG_CCK; 4657 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 4658 } 4659 /* Use the first valid TX antenna. */ 4660 txant = IWN_LSB(sc->txchainmask); 4661 tx->rflags |= IWN_RFLAG_ANT(txant); 4662 4663 essid = (struct iwn_scan_essid *)(tx + 1); 4664 if (ss->ss_ssid[0].len != 0) { 4665 essid[0].id = IEEE80211_ELEMID_SSID; 4666 essid[0].len = ss->ss_ssid[0].len; 4667 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4668 } 4669 4670 /* 4671 * Build a probe request frame. Most of the following code is a 4672 * copy & paste of what is done in net80211. 4673 */ 4674 wh = (struct ieee80211_frame *)(essid + 20); 4675 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 4676 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 4677 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 4678 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 4679 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 4680 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 4681 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 4682 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 4683 4684 frm = (uint8_t *)(wh + 1); 4685 4686 /* Add SSID IE. */ 4687 *frm++ = IEEE80211_ELEMID_SSID; 4688 *frm++ = ss->ss_ssid[0].len; 4689 memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 4690 frm += ss->ss_ssid[0].len; 4691 4692 /* Add supported rates IE. */ 4693 *frm++ = IEEE80211_ELEMID_RATES; 4694 nrates = rs->rs_nrates; 4695 if (nrates > IEEE80211_RATE_SIZE) 4696 nrates = IEEE80211_RATE_SIZE; 4697 *frm++ = nrates; 4698 memcpy(frm, rs->rs_rates, nrates); 4699 frm += nrates; 4700 4701 /* Add supported xrates IE. */ 4702 if (rs->rs_nrates > IEEE80211_RATE_SIZE) { 4703 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; 4704 *frm++ = IEEE80211_ELEMID_XRATES; 4705 *frm++ = (uint8_t)nrates; 4706 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); 4707 frm += nrates; 4708 } 4709 4710 /* Set length of probe request. */ 4711 tx->len = htole16(frm - (uint8_t *)wh); 4712 4713 c = ic->ic_curchan; 4714 chan = (struct iwn_scan_chan *)frm; 4715 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 4716 chan->flags = 0; 4717 if (ss->ss_nssid > 0) 4718 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 4719 chan->dsp_gain = 0x6e; 4720 if (IEEE80211_IS_CHAN_5GHZ(c) && 4721 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4722 chan->rf_gain = 0x3b; 4723 chan->active = htole16(24); 4724 chan->passive = htole16(110); 4725 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4726 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 4727 chan->rf_gain = 0x3b; 4728 chan->active = htole16(24); 4729 if (sc->rxon.associd) 4730 chan->passive = htole16(78); 4731 else 4732 chan->passive = htole16(110); 4733 hdr->crc_threshold = 0xffff; 4734 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 4735 chan->rf_gain = 0x28; 4736 chan->active = htole16(36); 4737 chan->passive = htole16(120); 4738 chan->flags |= htole32(IWN_CHAN_ACTIVE); 4739 } else { 4740 chan->rf_gain = 0x28; 4741 chan->active = htole16(36); 4742 if (sc->rxon.associd) 4743 chan->passive = htole16(88); 4744 else 4745 chan->passive = htole16(120); 4746 hdr->crc_threshold = 0xffff; 4747 } 4748 4749 DPRINTF(sc, IWN_DEBUG_STATE, 4750 "%s: chan %u flags 0x%x rf_gain 0x%x " 4751 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 4752 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 4753 chan->active, chan->passive); 4754 4755 hdr->nchan++; 4756 chan++; 4757 buflen = (uint8_t *)chan - buf; 4758 hdr->len = htole16(buflen); 4759 4760 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 4761 hdr->nchan); 4762 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 4763 free(buf, M_DEVBUF); 4764 return error; 4765 } 4766 4767 static int 4768 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 4769 { 4770 const struct iwn_hal *hal = sc->sc_hal; 4771 struct ifnet *ifp = sc->sc_ifp; 4772 struct ieee80211com *ic = ifp->if_l2com; 4773 struct ieee80211_node *ni = vap->iv_bss; 4774 int error; 4775 4776 sc->calib.state = IWN_CALIB_STATE_INIT; 4777 4778 /* Update adapter configuration. */ 4779 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4780 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4781 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4782 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4783 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4784 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4785 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4786 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4787 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4788 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4789 sc->rxon.cck_mask = 0; 4790 sc->rxon.ofdm_mask = 0x15; 4791 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4792 sc->rxon.cck_mask = 0x03; 4793 sc->rxon.ofdm_mask = 0; 4794 } else { 4795 /* XXX assume 802.11b/g */ 4796 sc->rxon.cck_mask = 0x0f; 4797 sc->rxon.ofdm_mask = 0x15; 4798 } 4799 DPRINTF(sc, IWN_DEBUG_STATE, 4800 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4801 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4802 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4803 __func__, 4804 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4805 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4806 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4807 le16toh(sc->rxon.rxchain), 4808 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4809 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4810 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4811 if (error != 0) { 4812 device_printf(sc->sc_dev, 4813 "%s: RXON command failed, error %d\n", __func__, error); 4814 return error; 4815 } 4816 4817 /* Configuration has changed, set TX power accordingly. */ 4818 error = hal->set_txpower(sc, ni->ni_chan, 1); 4819 if (error != 0) { 4820 device_printf(sc->sc_dev, 4821 "%s: could not set Tx power, error %d\n", __func__, error); 4822 return error; 4823 } 4824 /* 4825 * Reconfiguring RXON clears the firmware nodes table so we must 4826 * add the broadcast node again. 4827 */ 4828 error = iwn_add_broadcast_node(sc, 1); 4829 if (error != 0) { 4830 device_printf(sc->sc_dev, 4831 "%s: could not add broadcast node, error %d\n", 4832 __func__, error); 4833 return error; 4834 } 4835 return 0; 4836 } 4837 4838 /* 4839 * Configure the adapter for associated state. 4840 */ 4841 static int 4842 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 4843 { 4844 #define MS(v,x) (((v) & x) >> x##_S) 4845 const struct iwn_hal *hal = sc->sc_hal; 4846 struct ifnet *ifp = sc->sc_ifp; 4847 struct ieee80211com *ic = ifp->if_l2com; 4848 struct ieee80211_node *ni = vap->iv_bss; 4849 struct iwn_node_info node; 4850 int error; 4851 4852 sc->calib.state = IWN_CALIB_STATE_INIT; 4853 4854 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4855 /* Link LED blinks while monitoring. */ 4856 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 4857 return 0; 4858 } 4859 error = iwn_set_timing(sc, ni); 4860 if (error != 0) { 4861 device_printf(sc->sc_dev, 4862 "%s: could not set timing, error %d\n", __func__, error); 4863 return error; 4864 } 4865 4866 /* Update adapter configuration. */ 4867 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4868 sc->rxon.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); 4869 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 4870 /* Short preamble and slot time are negotiated when associating. */ 4871 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 4872 sc->rxon.flags |= htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 4873 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 4874 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4875 else 4876 sc->rxon.flags &= ~htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 4877 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4878 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 4879 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4880 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 4881 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 4882 sc->rxon.cck_mask = 0; 4883 sc->rxon.ofdm_mask = 0x15; 4884 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 4885 sc->rxon.cck_mask = 0x03; 4886 sc->rxon.ofdm_mask = 0; 4887 } else { 4888 /* XXX assume 802.11b/g */ 4889 sc->rxon.cck_mask = 0x0f; 4890 sc->rxon.ofdm_mask = 0x15; 4891 } 4892 #if 0 /* HT */ 4893 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 4894 sc->rxon.flags &= ~htole32(IWN_RXON_HT); 4895 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) 4896 sc->rxon.flags |= htole32(IWN_RXON_HT40U); 4897 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 4898 sc->rxon.flags |= htole32(IWN_RXON_HT40D); 4899 else 4900 sc->rxon.flags |= htole32(IWN_RXON_HT20); 4901 sc->rxon.rxchain = htole16( 4902 IWN_RXCHAIN_VALID(3) 4903 | IWN_RXCHAIN_MIMO_COUNT(3) 4904 | IWN_RXCHAIN_IDLE_COUNT(1) 4905 | IWN_RXCHAIN_MIMO_FORCE); 4906 4907 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); 4908 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); 4909 } else 4910 maxrxampdu = ampdudensity = 0; 4911 #endif 4912 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 4913 4914 DPRINTF(sc, IWN_DEBUG_STATE, 4915 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " 4916 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " 4917 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", 4918 __func__, 4919 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags), 4920 sc->rxon.cck_mask, sc->rxon.ofdm_mask, 4921 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask, 4922 le16toh(sc->rxon.rxchain), 4923 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":", 4924 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter)); 4925 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1); 4926 if (error != 0) { 4927 device_printf(sc->sc_dev, 4928 "%s: could not update configuration, error %d\n", 4929 __func__, error); 4930 return error; 4931 } 4932 4933 /* Configuration has changed, set TX power accordingly. */ 4934 error = hal->set_txpower(sc, ni->ni_chan, 1); 4935 if (error != 0) { 4936 device_printf(sc->sc_dev, 4937 "%s: could not set Tx power, error %d\n", __func__, error); 4938 return error; 4939 } 4940 4941 /* Add BSS node. */ 4942 memset(&node, 0, sizeof node); 4943 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 4944 node.id = IWN_ID_BSS; 4945 #ifdef notyet 4946 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) | 4947 IWN_AMDPU_DENSITY(5)); /* 2us */ 4948 #endif 4949 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n", 4950 __func__, node.id, le32toh(node.htflags)); 4951 error = hal->add_node(sc, &node, 1); 4952 if (error != 0) { 4953 device_printf(sc->sc_dev, "could not add BSS node\n"); 4954 return error; 4955 } 4956 DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n", 4957 node.id); 4958 error = iwn_set_link_quality(sc, node.id, 1); 4959 if (error != 0) { 4960 device_printf(sc->sc_dev, 4961 "%s: could not setup MRR for node %d, error %d\n", 4962 __func__, node.id, error); 4963 return error; 4964 } 4965 4966 error = iwn_init_sensitivity(sc); 4967 if (error != 0) { 4968 device_printf(sc->sc_dev, 4969 "%s: could not set sensitivity, error %d\n", 4970 __func__, error); 4971 return error; 4972 } 4973 4974 /* Start periodic calibration timer. */ 4975 sc->calib.state = IWN_CALIB_STATE_ASSOC; 4976 iwn_calib_reset(sc); 4977 4978 /* Link LED always on while associated. */ 4979 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 4980 4981 return 0; 4982 #undef MS 4983 } 4984 4985 #if 0 /* HT */ 4986 /* 4987 * This function is called by upper layer when an ADDBA request is received 4988 * from another STA and before the ADDBA response is sent. 4989 */ 4990 static int 4991 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 4992 uint8_t tid) 4993 { 4994 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 4995 struct iwn_softc *sc = ic->ic_softc; 4996 struct iwn_node *wn = (void *)ni; 4997 struct iwn_node_info node; 4998 4999 memset(&node, 0, sizeof node); 5000 node.id = wn->id; 5001 node.control = IWN_NODE_UPDATE; 5002 node.flags = IWN_FLAG_SET_ADDBA; 5003 node.addba_tid = tid; 5004 node.addba_ssn = htole16(ba->ba_winstart); 5005 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 5006 wn->id, tid, ba->ba_winstart)); 5007 return sc->sc_hal->add_node(sc, &node, 1); 5008 } 5009 5010 /* 5011 * This function is called by upper layer on teardown of an HT-immediate 5012 * Block Ack agreement (eg. uppon receipt of a DELBA frame.) 5013 */ 5014 static void 5015 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5016 uint8_t tid) 5017 { 5018 struct iwn_softc *sc = ic->ic_softc; 5019 struct iwn_node *wn = (void *)ni; 5020 struct iwn_node_info node; 5021 5022 memset(&node, 0, sizeof node); 5023 node.id = wn->id; 5024 node.control = IWN_NODE_UPDATE; 5025 node.flags = IWN_FLAG_SET_DELBA; 5026 node.delba_tid = tid; 5027 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5028 (void)sc->sc_hal->add_node(sc, &node, 1); 5029 } 5030 5031 /* 5032 * This function is called by upper layer when an ADDBA response is received 5033 * from another STA. 5034 */ 5035 static int 5036 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5037 uint8_t tid) 5038 { 5039 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5040 struct iwn_softc *sc = ic->ic_softc; 5041 const struct iwn_hal *hal = sc->sc_hal; 5042 struct iwn_node *wn = (void *)ni; 5043 struct iwn_node_info node; 5044 int error; 5045 5046 /* Enable TX for the specified RA/TID. */ 5047 wn->disable_tid &= ~(1 << tid); 5048 memset(&node, 0, sizeof node); 5049 node.id = wn->id; 5050 node.control = IWN_NODE_UPDATE; 5051 node.flags = IWN_FLAG_SET_DISABLE_TID; 5052 node.disable_tid = htole16(wn->disable_tid); 5053 error = hal->add_node(sc, &node, 1); 5054 if (error != 0) 5055 return error; 5056 5057 if ((error = iwn_nic_lock(sc)) != 0) 5058 return error; 5059 hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5060 iwn_nic_unlock(sc); 5061 return 0; 5062 } 5063 5064 static void 5065 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5066 uint8_t tid) 5067 { 5068 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5069 struct iwn_softc *sc = ic->ic_softc; 5070 int error; 5071 5072 error = iwn_nic_lock(sc); 5073 if (error != 0) 5074 return; 5075 sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5076 iwn_nic_unlock(sc); 5077 } 5078 5079 static void 5080 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5081 uint8_t tid, uint16_t ssn) 5082 { 5083 struct iwn_node *wn = (void *)ni; 5084 int qid = 7 + tid; 5085 5086 /* Stop TX scheduler while we're changing its configuration. */ 5087 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5088 IWN4965_TXQ_STATUS_CHGACT); 5089 5090 /* Assign RA/TID translation to the queue. */ 5091 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5092 wn->id << 4 | tid); 5093 5094 /* Enable chain-building mode for the queue. */ 5095 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5096 5097 /* Set starting sequence number from the ADDBA request. */ 5098 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5099 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5100 5101 /* Set scheduler window size. */ 5102 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5103 IWN_SCHED_WINSZ); 5104 /* Set scheduler frame limit. */ 5105 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5106 IWN_SCHED_LIMIT << 16); 5107 5108 /* Enable interrupts for the queue. */ 5109 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5110 5111 /* Mark the queue as active. */ 5112 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5113 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5114 iwn_tid2fifo[tid] << 1); 5115 } 5116 5117 static void 5118 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5119 { 5120 int qid = 7 + tid; 5121 5122 /* Stop TX scheduler while we're changing its configuration. */ 5123 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5124 IWN4965_TXQ_STATUS_CHGACT); 5125 5126 /* Set starting sequence number from the ADDBA request. */ 5127 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5128 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5129 5130 /* Disable interrupts for the queue. */ 5131 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5132 5133 /* Mark the queue as inactive. */ 5134 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5135 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5136 } 5137 5138 static void 5139 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5140 uint8_t tid, uint16_t ssn) 5141 { 5142 struct iwn_node *wn = (void *)ni; 5143 int qid = 10 + tid; 5144 5145 /* Stop TX scheduler while we're changing its configuration. */ 5146 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5147 IWN5000_TXQ_STATUS_CHGACT); 5148 5149 /* Assign RA/TID translation to the queue. */ 5150 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5151 wn->id << 4 | tid); 5152 5153 /* Enable chain-building mode for the queue. */ 5154 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5155 5156 /* Enable aggregation for the queue. */ 5157 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5158 5159 /* Set starting sequence number from the ADDBA request. */ 5160 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5161 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5162 5163 /* Set scheduler window size and frame limit. */ 5164 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5165 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5166 5167 /* Enable interrupts for the queue. */ 5168 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5169 5170 /* Mark the queue as active. */ 5171 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5172 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5173 } 5174 5175 static void 5176 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 5177 { 5178 int qid = 10 + tid; 5179 5180 /* Stop TX scheduler while we're changing its configuration. */ 5181 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5182 IWN5000_TXQ_STATUS_CHGACT); 5183 5184 /* Disable aggregation for the queue. */ 5185 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5186 5187 /* Set starting sequence number from the ADDBA request. */ 5188 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5189 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5190 5191 /* Disable interrupts for the queue. */ 5192 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5193 5194 /* Mark the queue as inactive. */ 5195 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5196 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5197 } 5198 #endif 5199 5200 /* 5201 * Send calibration results to the runtime firmware. These results were 5202 * obtained on first boot from the initialization firmware, or by reading 5203 * the EEPROM for crystal calibration. 5204 */ 5205 static int 5206 iwn5000_send_calib_results(struct iwn_softc *sc) 5207 { 5208 struct iwn_calib_info *calib_result; 5209 int idx, error; 5210 5211 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5212 calib_result = &sc->calib_results[idx]; 5213 5214 /* No support for this type of calibration. */ 5215 if ((sc->calib_init & (1 << idx)) == 0) 5216 continue; 5217 5218 /* No calibration result available. */ 5219 if (calib_result->buf == NULL) 5220 continue; 5221 5222 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5223 "%s: send calibration result idx=%d, len=%d\n", 5224 __func__, idx, calib_result->len); 5225 5226 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf, 5227 calib_result->len, 0); 5228 if (error != 0) { 5229 device_printf(sc->sc_dev, 5230 "%s: could not send calibration result " 5231 "idx=%d, error=%d\n", 5232 __func__, idx, error); 5233 return error; 5234 } 5235 } 5236 return 0; 5237 } 5238 5239 /* 5240 * Save calibration result at the given index. The index determines 5241 * in which order the results are sent to the runtime firmware. 5242 */ 5243 static int 5244 iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib, 5245 int len, int idx) 5246 { 5247 struct iwn_calib_info *calib_result = &sc->calib_results[idx]; 5248 5249 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5250 "%s: saving calibration result code=%d, idx=%d, len=%d\n", 5251 __func__, calib->code, idx, len); 5252 5253 if (calib_result->buf != NULL) 5254 free(calib_result->buf, M_DEVBUF); 5255 5256 calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT); 5257 if (calib_result->buf == NULL) { 5258 device_printf(sc->sc_dev, 5259 "%s: not enough memory for calibration result " 5260 "code=%d, len=%d\n", __func__, calib->code, len); 5261 return ENOMEM; 5262 } 5263 5264 calib_result->len = len; 5265 memcpy(calib_result->buf, calib, len); 5266 return 0; 5267 } 5268 5269 static void 5270 iwn5000_free_calib_results(struct iwn_softc *sc) 5271 { 5272 struct iwn_calib_info *calib_result; 5273 int idx; 5274 5275 for (idx = 0; idx < IWN_CALIB_NUM; idx++) { 5276 calib_result = &sc->calib_results[idx]; 5277 5278 if (calib_result->buf != NULL) 5279 free(calib_result->buf, M_DEVBUF); 5280 5281 calib_result->buf = NULL; 5282 calib_result->len = 0; 5283 } 5284 } 5285 5286 /* 5287 * Obtain the crystal calibration result from the EEPROM. 5288 */ 5289 static int 5290 iwn5000_chrystal_calib(struct iwn_softc *sc) 5291 { 5292 struct iwn5000_phy_calib_crystal cmd; 5293 uint32_t base, crystal; 5294 uint16_t val; 5295 5296 /* Read crystal calibration. */ 5297 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 5298 base = le16toh(val); 5299 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal, 5300 sizeof(uint32_t)); 5301 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n", 5302 __func__, le32toh(crystal)); 5303 5304 memset(&cmd, 0, sizeof cmd); 5305 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5306 cmd.ngroups = 1; 5307 cmd.isvalid = 1; 5308 cmd.cap_pin[0] = le32toh(crystal) & 0xff; 5309 cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff; 5310 5311 return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd, 5312 sizeof cmd, IWN_CALIB_IDX_XTAL); 5313 } 5314 5315 /* 5316 * Query calibration results from the initialization firmware. We do this 5317 * only once at first boot. 5318 */ 5319 static int 5320 iwn5000_send_calib_query(struct iwn_softc *sc) 5321 { 5322 #define CALIB_INIT_CFG 0xffffffff; 5323 struct iwn5000_calib_config cmd; 5324 int error; 5325 5326 memset(&cmd, 0, sizeof cmd); 5327 cmd.ucode.once.enable = CALIB_INIT_CFG; 5328 cmd.ucode.once.start = CALIB_INIT_CFG; 5329 cmd.ucode.once.send = CALIB_INIT_CFG; 5330 cmd.ucode.flags = CALIB_INIT_CFG; 5331 5332 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: query calibration results\n", 5333 __func__); 5334 5335 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5336 if (error != 0) 5337 return error; 5338 5339 /* Wait at most two seconds for calibration to complete. */ 5340 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5341 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz); 5342 5343 return error; 5344 #undef CALIB_INIT_CFG 5345 } 5346 5347 /* 5348 * Process a CALIBRATION_RESULT notification sent by the initialization 5349 * firmware on response to a CMD_CALIB_CONFIG command. 5350 */ 5351 static int 5352 iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc, 5353 struct iwn_rx_data *data) 5354 { 5355 #define FRAME_SIZE_MASK 0x3fff 5356 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 5357 int len, idx; 5358 5359 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 5360 len = (le32toh(desc->len) & FRAME_SIZE_MASK); 5361 5362 /* Remove length field itself. */ 5363 len -= 4; 5364 5365 /* 5366 * Determine the order in which the results will be send to the 5367 * runtime firmware. 5368 */ 5369 switch (calib->code) { 5370 case IWN5000_PHY_CALIB_DC: 5371 idx = IWN_CALIB_IDX_DC; 5372 break; 5373 case IWN5000_PHY_CALIB_LO: 5374 idx = IWN_CALIB_IDX_LO; 5375 break; 5376 case IWN5000_PHY_CALIB_TX_IQ: 5377 idx = IWN_CALIB_IDX_TX_IQ; 5378 break; 5379 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 5380 idx = IWN_CALIB_IDX_TX_IQ_PERIODIC; 5381 break; 5382 case IWN5000_PHY_CALIB_BASE_BAND: 5383 idx = IWN_CALIB_IDX_BASE_BAND; 5384 break; 5385 default: 5386 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5387 "%s: unknown calibration code=%d\n", __func__, calib->code); 5388 return EINVAL; 5389 } 5390 return iwn5000_save_calib_result(sc, calib, len, idx); 5391 #undef FRAME_SIZE_MASK 5392 } 5393 5394 static int 5395 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5396 { 5397 struct iwn5000_wimax_coex wimax; 5398 5399 #ifdef notyet 5400 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5401 /* Enable WiMAX coexistence for combo adapters. */ 5402 wimax.flags = 5403 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5404 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5405 IWN_WIMAX_COEX_STA_TABLE_VALID | 5406 IWN_WIMAX_COEX_ENABLE; 5407 memcpy(wimax.events, iwn6050_wimax_events, 5408 sizeof iwn6050_wimax_events); 5409 } else 5410 #endif 5411 { 5412 /* Disable WiMAX coexistence. */ 5413 wimax.flags = 0; 5414 memset(wimax.events, 0, sizeof wimax.events); 5415 } 5416 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 5417 __func__); 5418 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5419 } 5420 5421 /* 5422 * This function is called after the runtime firmware notifies us of its 5423 * readiness (called in a process context.) 5424 */ 5425 static int 5426 iwn4965_post_alive(struct iwn_softc *sc) 5427 { 5428 int error, qid; 5429 5430 if ((error = iwn_nic_lock(sc)) != 0) 5431 return error; 5432 5433 /* Clear TX scheduler state in SRAM. */ 5434 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5435 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5436 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5437 5438 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5439 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5440 5441 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5442 5443 /* Disable chain mode for all our 16 queues. */ 5444 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5445 5446 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5447 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5448 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5449 5450 /* Set scheduler window size. */ 5451 iwn_mem_write(sc, sc->sched_base + 5452 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5453 /* Set scheduler frame limit. */ 5454 iwn_mem_write(sc, sc->sched_base + 5455 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5456 IWN_SCHED_LIMIT << 16); 5457 } 5458 5459 /* Enable interrupts for all our 16 queues. */ 5460 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5461 /* Identify TX FIFO rings (0-7). */ 5462 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5463 5464 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5465 for (qid = 0; qid < 7; qid++) { 5466 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5467 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5468 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5469 } 5470 iwn_nic_unlock(sc); 5471 return 0; 5472 } 5473 5474 /* 5475 * This function is called after the initialization or runtime firmware 5476 * notifies us of its readiness (called in a process context.) 5477 */ 5478 static int 5479 iwn5000_post_alive(struct iwn_softc *sc) 5480 { 5481 int error, qid; 5482 5483 /* Switch to using ICT interrupt mode. */ 5484 iwn5000_ict_reset(sc); 5485 5486 error = iwn_nic_lock(sc); 5487 if (error != 0) 5488 return error; 5489 5490 /* Clear TX scheduler state in SRAM. */ 5491 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5492 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5493 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5494 5495 /* Set physical address of TX scheduler rings (1KB aligned.) */ 5496 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5497 5498 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5499 5500 /* Enable chain mode for all queues, except command queue. */ 5501 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5502 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5503 5504 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5505 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5506 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5507 5508 iwn_mem_write(sc, sc->sched_base + 5509 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5510 /* Set scheduler window size and frame limit. */ 5511 iwn_mem_write(sc, sc->sched_base + 5512 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5513 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5514 } 5515 5516 /* Enable interrupts for all our 20 queues. */ 5517 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5518 /* Identify TX FIFO rings (0-7). */ 5519 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5520 5521 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5522 for (qid = 0; qid < 7; qid++) { 5523 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5524 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5525 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 5526 } 5527 iwn_nic_unlock(sc); 5528 5529 /* Configure WiMAX coexistence for combo adapters. */ 5530 error = iwn5000_send_wimax_coex(sc); 5531 if (error != 0) { 5532 device_printf(sc->sc_dev, 5533 "%s: could not configure WiMAX coexistence, error %d\n", 5534 __func__, error); 5535 return error; 5536 } 5537 5538 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 5539 /* 5540 * Start calibration by setting and sending the chrystal 5541 * calibration first, this must be done before we are able 5542 * to query the other calibration results. 5543 */ 5544 error = iwn5000_chrystal_calib(sc); 5545 if (error != 0) { 5546 device_printf(sc->sc_dev, 5547 "%s: could not set chrystal calibration, " 5548 "error=%d\n", __func__, error); 5549 return error; 5550 } 5551 error = iwn5000_send_calib_results(sc); 5552 if (error != 0) { 5553 device_printf(sc->sc_dev, 5554 "%s: could not send chrystal calibration, " 5555 "error=%d\n", __func__, error); 5556 return error; 5557 } 5558 5559 /* 5560 * Query other calibration results from the initialization 5561 * firmware. 5562 */ 5563 error = iwn5000_send_calib_query(sc); 5564 if (error != 0) { 5565 device_printf(sc->sc_dev, 5566 "%s: could not query calibration, error=%d\n", 5567 __func__, error); 5568 return error; 5569 } 5570 5571 /* 5572 * We have the calibration results now, reboot with the 5573 * runtime firmware (call ourselves recursively!) 5574 */ 5575 iwn_hw_stop(sc); 5576 error = iwn_hw_init(sc); 5577 } else { 5578 /* 5579 * Send calibration results obtained from the initialization 5580 * firmware to the runtime firmware. 5581 */ 5582 error = iwn5000_send_calib_results(sc); 5583 } 5584 return error; 5585 } 5586 5587 /* 5588 * The firmware boot code is small and is intended to be copied directly into 5589 * the NIC internal memory (no DMA transfer.) 5590 */ 5591 static int 5592 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 5593 { 5594 int error, ntries; 5595 5596 size /= sizeof (uint32_t); 5597 5598 error = iwn_nic_lock(sc); 5599 if (error != 0) 5600 return error; 5601 5602 /* Copy microcode image into NIC memory. */ 5603 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 5604 (const uint32_t *)ucode, size); 5605 5606 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 5607 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 5608 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 5609 5610 /* Start boot load now. */ 5611 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 5612 5613 /* Wait for transfer to complete. */ 5614 for (ntries = 0; ntries < 1000; ntries++) { 5615 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 5616 IWN_BSM_WR_CTRL_START)) 5617 break; 5618 DELAY(10); 5619 } 5620 if (ntries == 1000) { 5621 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5622 __func__); 5623 iwn_nic_unlock(sc); 5624 return ETIMEDOUT; 5625 } 5626 5627 /* Enable boot after power up. */ 5628 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 5629 5630 iwn_nic_unlock(sc); 5631 return 0; 5632 } 5633 5634 static int 5635 iwn4965_load_firmware(struct iwn_softc *sc) 5636 { 5637 struct iwn_fw_info *fw = &sc->fw; 5638 struct iwn_dma_info *dma = &sc->fw_dma; 5639 int error; 5640 5641 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 5642 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 5643 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5644 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5645 fw->init.text, fw->init.textsz); 5646 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5647 5648 /* Tell adapter where to find initialization sections. */ 5649 error = iwn_nic_lock(sc); 5650 if (error != 0) 5651 return error; 5652 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5653 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 5654 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5655 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5656 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 5657 iwn_nic_unlock(sc); 5658 5659 /* Load firmware boot code. */ 5660 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 5661 if (error != 0) { 5662 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 5663 __func__); 5664 return error; 5665 } 5666 /* Now press "execute". */ 5667 IWN_WRITE(sc, IWN_RESET, 0); 5668 5669 /* Wait at most one second for first alive notification. */ 5670 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5671 if (error) { 5672 device_printf(sc->sc_dev, 5673 "%s: timeout waiting for adapter to initialize, error %d\n", 5674 __func__, error); 5675 return error; 5676 } 5677 5678 /* Retrieve current temperature for initial TX power calibration. */ 5679 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 5680 sc->temp = iwn4965_get_temperature(sc); 5681 5682 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 5683 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 5684 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5685 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 5686 fw->main.text, fw->main.textsz); 5687 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5688 5689 /* Tell adapter where to find runtime sections. */ 5690 error = iwn_nic_lock(sc); 5691 if (error != 0) 5692 return error; 5693 5694 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 5695 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 5696 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 5697 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 5698 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 5699 IWN_FW_UPDATED | fw->main.textsz); 5700 iwn_nic_unlock(sc); 5701 5702 return 0; 5703 } 5704 5705 static int 5706 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 5707 const uint8_t *section, int size) 5708 { 5709 struct iwn_dma_info *dma = &sc->fw_dma; 5710 int error; 5711 5712 /* Copy firmware section into pre-allocated DMA-safe memory. */ 5713 memcpy(dma->vaddr, section, size); 5714 bus_dmamap_sync(sc->fw_dma.tag, dma->map, BUS_DMASYNC_PREWRITE); 5715 5716 error = iwn_nic_lock(sc); 5717 if (error != 0) 5718 return error; 5719 5720 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5721 IWN_FH_TX_CONFIG_DMA_PAUSE); 5722 5723 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 5724 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 5725 IWN_LOADDR(dma->paddr)); 5726 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 5727 IWN_HIADDR(dma->paddr) << 28 | size); 5728 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 5729 IWN_FH_TXBUF_STATUS_TBNUM(1) | 5730 IWN_FH_TXBUF_STATUS_TBIDX(1) | 5731 IWN_FH_TXBUF_STATUS_TFBD_VALID); 5732 5733 /* Kick Flow Handler to start DMA transfer. */ 5734 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 5735 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 5736 5737 iwn_nic_unlock(sc); 5738 5739 /* Wait at most five seconds for FH DMA transfer to complete. */ 5740 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 5741 } 5742 5743 static int 5744 iwn5000_load_firmware(struct iwn_softc *sc) 5745 { 5746 struct iwn_fw_part *fw; 5747 int error; 5748 5749 /* Load the initialization firmware on first boot only. */ 5750 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 5751 &sc->fw.main : &sc->fw.init; 5752 5753 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 5754 fw->text, fw->textsz); 5755 if (error != 0) { 5756 device_printf(sc->sc_dev, 5757 "%s: could not load firmware %s section, error %d\n", 5758 __func__, ".text", error); 5759 return error; 5760 } 5761 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 5762 fw->data, fw->datasz); 5763 if (error != 0) { 5764 device_printf(sc->sc_dev, 5765 "%s: could not load firmware %s section, error %d\n", 5766 __func__, ".data", error); 5767 return error; 5768 } 5769 5770 /* Now press "execute". */ 5771 IWN_WRITE(sc, IWN_RESET, 0); 5772 return 0; 5773 } 5774 5775 /* 5776 * Extract text and data sections from a legacy firmware image. 5777 */ 5778 static int 5779 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 5780 { 5781 const uint32_t *ptr; 5782 size_t hdrlen = 24; 5783 uint32_t rev; 5784 5785 ptr = (const uint32_t *)sc->fw_fp->data; 5786 rev = le32toh(*ptr++); 5787 5788 /* Check firmware API version. */ 5789 if (IWN_FW_API(rev) <= 1) { 5790 device_printf(sc->sc_dev, 5791 "%s: bad firmware, need API version >=2\n", __func__); 5792 return EINVAL; 5793 } 5794 if (IWN_FW_API(rev) >= 3) { 5795 /* Skip build number (version 2 header). */ 5796 hdrlen += 4; 5797 ptr++; 5798 } 5799 if (fw->size < hdrlen) { 5800 device_printf(sc->sc_dev, 5801 "%s: firmware file too short: %zu bytes\n", 5802 __func__, fw->size); 5803 return EINVAL; 5804 } 5805 fw->main.textsz = le32toh(*ptr++); 5806 fw->main.datasz = le32toh(*ptr++); 5807 fw->init.textsz = le32toh(*ptr++); 5808 fw->init.datasz = le32toh(*ptr++); 5809 fw->boot.textsz = le32toh(*ptr++); 5810 5811 /* Check that all firmware sections fit. */ 5812 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 5813 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 5814 device_printf(sc->sc_dev, 5815 "%s: firmware file too short: %zu bytes\n", 5816 __func__, fw->size); 5817 return EINVAL; 5818 } 5819 5820 /* Get pointers to firmware sections. */ 5821 fw->main.text = (const uint8_t *)ptr; 5822 fw->main.data = fw->main.text + fw->main.textsz; 5823 fw->init.text = fw->main.data + fw->main.datasz; 5824 fw->init.data = fw->init.text + fw->init.textsz; 5825 fw->boot.text = fw->init.data + fw->init.datasz; 5826 5827 return 0; 5828 } 5829 5830 /* 5831 * Extract text and data sections from a TLV firmware image. 5832 */ 5833 int 5834 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 5835 uint16_t alt) 5836 { 5837 const struct iwn_fw_tlv_hdr *hdr; 5838 const struct iwn_fw_tlv *tlv; 5839 const uint8_t *ptr, *end; 5840 uint64_t altmask; 5841 uint32_t len; 5842 5843 if (fw->size < sizeof (*hdr)) { 5844 device_printf(sc->sc_dev, 5845 "%s: firmware file too short: %zu bytes\n", 5846 __func__, fw->size); 5847 return EINVAL; 5848 } 5849 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 5850 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 5851 device_printf(sc->sc_dev, 5852 "%s: bad firmware file signature 0x%08x\n", 5853 __func__, le32toh(hdr->signature)); 5854 return EINVAL; 5855 } 5856 5857 /* 5858 * Select the closest supported alternative that is less than 5859 * or equal to the specified one. 5860 */ 5861 altmask = le64toh(hdr->altmask); 5862 while (alt > 0 && !(altmask & (1ULL << alt))) 5863 alt--; /* Downgrade. */ 5864 5865 ptr = (const uint8_t *)(hdr + 1); 5866 end = (const uint8_t *)(fw->data + fw->size); 5867 5868 /* Parse type-length-value fields. */ 5869 while (ptr + sizeof (*tlv) <= end) { 5870 tlv = (const struct iwn_fw_tlv *)ptr; 5871 len = le32toh(tlv->len); 5872 5873 ptr += sizeof (*tlv); 5874 if (ptr + len > end) { 5875 device_printf(sc->sc_dev, 5876 "%s: firmware file too short: %zu bytes\n", 5877 __func__, fw->size); 5878 return EINVAL; 5879 } 5880 /* Skip other alternatives. */ 5881 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 5882 goto next; 5883 5884 switch (le16toh(tlv->type)) { 5885 case IWN_FW_TLV_MAIN_TEXT: 5886 fw->main.text = ptr; 5887 fw->main.textsz = len; 5888 break; 5889 case IWN_FW_TLV_MAIN_DATA: 5890 fw->main.data = ptr; 5891 fw->main.datasz = len; 5892 break; 5893 case IWN_FW_TLV_INIT_TEXT: 5894 fw->init.text = ptr; 5895 fw->init.textsz = len; 5896 break; 5897 case IWN_FW_TLV_INIT_DATA: 5898 fw->init.data = ptr; 5899 fw->init.datasz = len; 5900 break; 5901 case IWN_FW_TLV_BOOT_TEXT: 5902 fw->boot.text = ptr; 5903 fw->boot.textsz = len; 5904 break; 5905 default: 5906 DPRINTF(sc, IWN_DEBUG_RESET, 5907 "%s: TLV type %d not handled\n", 5908 __func__, le16toh(tlv->type)); 5909 break; 5910 } 5911 next: /* TLV fields are 32-bit aligned. */ 5912 ptr += (len + 3) & ~3; 5913 } 5914 return 0; 5915 } 5916 5917 static int 5918 iwn_read_firmware(struct iwn_softc *sc) 5919 { 5920 const struct iwn_hal *hal = sc->sc_hal; 5921 struct iwn_fw_info *fw = &sc->fw; 5922 int error; 5923 5924 IWN_UNLOCK(sc); 5925 5926 memset(fw, 0, sizeof (*fw)); 5927 5928 /* Read firmware image from filesystem. */ 5929 sc->fw_fp = firmware_get(sc->fwname); 5930 if (sc->fw_fp == NULL) { 5931 device_printf(sc->sc_dev, 5932 "%s: could not load firmare image \"%s\"\n", __func__, 5933 sc->fwname); 5934 IWN_LOCK(sc); 5935 return EINVAL; 5936 } 5937 IWN_LOCK(sc); 5938 5939 fw->size = sc->fw_fp->datasize; 5940 fw->data = (const uint8_t *)sc->fw_fp->data; 5941 if (fw->size < sizeof (uint32_t)) { 5942 device_printf(sc->sc_dev, 5943 "%s: firmware file too short: %zu bytes\n", 5944 __func__, fw->size); 5945 return EINVAL; 5946 } 5947 5948 /* Retrieve text and data sections. */ 5949 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 5950 error = iwn_read_firmware_leg(sc, fw); 5951 else 5952 error = iwn_read_firmware_tlv(sc, fw, 1); 5953 if (error != 0) { 5954 device_printf(sc->sc_dev, 5955 "%s: could not read firmware sections\n", __func__); 5956 return error; 5957 } 5958 5959 /* Make sure text and data sections fit in hardware memory. */ 5960 if (fw->main.textsz > hal->fw_text_maxsz || 5961 fw->main.datasz > hal->fw_data_maxsz || 5962 fw->init.textsz > hal->fw_text_maxsz || 5963 fw->init.datasz > hal->fw_data_maxsz || 5964 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 5965 (fw->boot.textsz & 3) != 0) { 5966 device_printf(sc->sc_dev, 5967 "%s: firmware sections too large\n", __func__); 5968 return EINVAL; 5969 } 5970 5971 /* We can proceed with loading the firmware. */ 5972 return 0; 5973 } 5974 5975 static int 5976 iwn_clock_wait(struct iwn_softc *sc) 5977 { 5978 int ntries; 5979 5980 /* Set "initialization complete" bit. */ 5981 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 5982 5983 /* Wait for clock stabilization. */ 5984 for (ntries = 0; ntries < 2500; ntries++) { 5985 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 5986 return 0; 5987 DELAY(10); 5988 } 5989 device_printf(sc->sc_dev, 5990 "%s: timeout waiting for clock stabilization\n", __func__); 5991 return ETIMEDOUT; 5992 } 5993 5994 static int 5995 iwn_apm_init(struct iwn_softc *sc) 5996 { 5997 uint32_t tmp; 5998 int error; 5999 6000 /* Disable L0s exit timer (NMI bug workaround.) */ 6001 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6002 /* Don't wait for ICH L0s (ICH bug workaround.) */ 6003 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6004 6005 /* Set FH wait threshold to max (HW bug under stress workaround.) */ 6006 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6007 6008 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6009 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6010 6011 /* Retrieve PCIe Active State Power Management (ASPM). */ 6012 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6013 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6014 if (tmp & 0x02) /* L1 Entry enabled. */ 6015 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6016 else 6017 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6018 6019 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6020 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6021 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6022 6023 /* Wait for clock stabilization before accessing prph. */ 6024 error = iwn_clock_wait(sc); 6025 if (error != 0) 6026 return error; 6027 6028 error = iwn_nic_lock(sc); 6029 if (error != 0) 6030 return error; 6031 6032 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6033 /* Enable DMA and BSM (Bootstrap State Machine.) */ 6034 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6035 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6036 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6037 } else { 6038 /* Enable DMA. */ 6039 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6040 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6041 } 6042 DELAY(20); 6043 6044 /* Disable L1-Active. */ 6045 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6046 iwn_nic_unlock(sc); 6047 6048 return 0; 6049 } 6050 6051 static void 6052 iwn_apm_stop_master(struct iwn_softc *sc) 6053 { 6054 int ntries; 6055 6056 /* Stop busmaster DMA activity. */ 6057 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6058 for (ntries = 0; ntries < 100; ntries++) { 6059 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6060 return; 6061 DELAY(10); 6062 } 6063 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 6064 __func__); 6065 } 6066 6067 static void 6068 iwn_apm_stop(struct iwn_softc *sc) 6069 { 6070 iwn_apm_stop_master(sc); 6071 6072 /* Reset the entire device. */ 6073 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6074 DELAY(10); 6075 /* Clear "initialization complete" bit. */ 6076 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6077 } 6078 6079 static int 6080 iwn4965_nic_config(struct iwn_softc *sc) 6081 { 6082 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6083 /* 6084 * I don't believe this to be correct but this is what the 6085 * vendor driver is doing. Probably the bits should not be 6086 * shifted in IWN_RFCFG_*. 6087 */ 6088 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6089 IWN_RFCFG_TYPE(sc->rfcfg) | 6090 IWN_RFCFG_STEP(sc->rfcfg) | 6091 IWN_RFCFG_DASH(sc->rfcfg)); 6092 } 6093 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6094 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6095 return 0; 6096 } 6097 6098 static int 6099 iwn5000_nic_config(struct iwn_softc *sc) 6100 { 6101 uint32_t tmp; 6102 int error; 6103 6104 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6105 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6106 IWN_RFCFG_TYPE(sc->rfcfg) | 6107 IWN_RFCFG_STEP(sc->rfcfg) | 6108 IWN_RFCFG_DASH(sc->rfcfg)); 6109 } 6110 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6111 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6112 6113 error = iwn_nic_lock(sc); 6114 if (error != 0) 6115 return error; 6116 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6117 6118 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6119 /* 6120 * Select first Switching Voltage Regulator (1.32V) to 6121 * solve a stability issue related to noisy DC2DC line 6122 * in the silicon of 1000 Series. 6123 */ 6124 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6125 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6126 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6127 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6128 } 6129 iwn_nic_unlock(sc); 6130 6131 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6132 /* Use internal power amplifier only. */ 6133 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6134 } 6135 if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) { 6136 /* Indicate that ROM calibration version is >=6. */ 6137 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6138 } 6139 return 0; 6140 } 6141 6142 /* 6143 * Take NIC ownership over Intel Active Management Technology (AMT). 6144 */ 6145 static int 6146 iwn_hw_prepare(struct iwn_softc *sc) 6147 { 6148 int ntries; 6149 6150 /* Check if hardware is ready. */ 6151 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6152 for (ntries = 0; ntries < 5; ntries++) { 6153 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6154 IWN_HW_IF_CONFIG_NIC_READY) 6155 return 0; 6156 DELAY(10); 6157 } 6158 6159 /* Hardware not ready, force into ready state. */ 6160 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6161 for (ntries = 0; ntries < 15000; ntries++) { 6162 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6163 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6164 break; 6165 DELAY(10); 6166 } 6167 if (ntries == 15000) 6168 return ETIMEDOUT; 6169 6170 /* Hardware should be ready now. */ 6171 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6172 for (ntries = 0; ntries < 5; ntries++) { 6173 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6174 IWN_HW_IF_CONFIG_NIC_READY) 6175 return 0; 6176 DELAY(10); 6177 } 6178 return ETIMEDOUT; 6179 } 6180 6181 static int 6182 iwn_hw_init(struct iwn_softc *sc) 6183 { 6184 const struct iwn_hal *hal = sc->sc_hal; 6185 int error, chnl, qid; 6186 6187 /* Clear pending interrupts. */ 6188 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6189 6190 error = iwn_apm_init(sc); 6191 if (error != 0) { 6192 device_printf(sc->sc_dev, 6193 "%s: could not power ON adapter, error %d\n", 6194 __func__, error); 6195 return error; 6196 } 6197 6198 /* Select VMAIN power source. */ 6199 error = iwn_nic_lock(sc); 6200 if (error != 0) 6201 return error; 6202 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6203 iwn_nic_unlock(sc); 6204 6205 /* Perform adapter-specific initialization. */ 6206 error = hal->nic_config(sc); 6207 if (error != 0) 6208 return error; 6209 6210 /* Initialize RX ring. */ 6211 error = iwn_nic_lock(sc); 6212 if (error != 0) 6213 return error; 6214 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6215 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6216 /* Set physical address of RX ring (256-byte aligned.) */ 6217 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6218 /* Set physical address of RX status (16-byte aligned.) */ 6219 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6220 /* Enable RX. */ 6221 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6222 IWN_FH_RX_CONFIG_ENA | 6223 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6224 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6225 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6226 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6227 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6228 iwn_nic_unlock(sc); 6229 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6230 6231 error = iwn_nic_lock(sc); 6232 if (error != 0) 6233 return error; 6234 6235 /* Initialize TX scheduler. */ 6236 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6237 6238 /* Set physical address of "keep warm" page (16-byte aligned.) */ 6239 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6240 6241 /* Initialize TX rings. */ 6242 for (qid = 0; qid < hal->ntxqs; qid++) { 6243 struct iwn_tx_ring *txq = &sc->txq[qid]; 6244 6245 /* Set physical address of TX ring (256-byte aligned.) */ 6246 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6247 txq->desc_dma.paddr >> 8); 6248 } 6249 iwn_nic_unlock(sc); 6250 6251 /* Enable DMA channels. */ 6252 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6253 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6254 IWN_FH_TX_CONFIG_DMA_ENA | 6255 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6256 } 6257 6258 /* Clear "radio off" and "commands blocked" bits. */ 6259 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6260 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6261 6262 /* Clear pending interrupts. */ 6263 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6264 /* Enable interrupt coalescing. */ 6265 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6266 /* Enable interrupts. */ 6267 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6268 6269 /* _Really_ make sure "radio off" bit is cleared! */ 6270 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6271 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6272 6273 error = hal->load_firmware(sc); 6274 if (error != 0) { 6275 device_printf(sc->sc_dev, 6276 "%s: could not load firmware, error %d\n", 6277 __func__, error); 6278 return error; 6279 } 6280 /* Wait at most one second for firmware alive notification. */ 6281 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); 6282 if (error != 0) { 6283 device_printf(sc->sc_dev, 6284 "%s: timeout waiting for adapter to initialize, error %d\n", 6285 __func__, error); 6286 return error; 6287 } 6288 /* Do post-firmware initialization. */ 6289 return hal->post_alive(sc); 6290 } 6291 6292 static void 6293 iwn_hw_stop(struct iwn_softc *sc) 6294 { 6295 const struct iwn_hal *hal = sc->sc_hal; 6296 uint32_t tmp; 6297 int chnl, qid, ntries; 6298 6299 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6300 6301 /* Disable interrupts. */ 6302 IWN_WRITE(sc, IWN_INT_MASK, 0); 6303 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6304 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6305 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6306 6307 /* Make sure we no longer hold the NIC lock. */ 6308 iwn_nic_unlock(sc); 6309 6310 /* Stop TX scheduler. */ 6311 iwn_prph_write(sc, hal->sched_txfact_addr, 0); 6312 6313 /* Stop all DMA channels. */ 6314 if (iwn_nic_lock(sc) == 0) { 6315 for (chnl = 0; chnl < hal->ndmachnls; chnl++) { 6316 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6317 for (ntries = 0; ntries < 200; ntries++) { 6318 tmp = IWN_READ(sc, IWN_FH_TX_STATUS); 6319 if ((tmp & IWN_FH_TX_STATUS_IDLE(chnl)) == 6320 IWN_FH_TX_STATUS_IDLE(chnl)) 6321 break; 6322 DELAY(10); 6323 } 6324 } 6325 iwn_nic_unlock(sc); 6326 } 6327 6328 /* Stop RX ring. */ 6329 iwn_reset_rx_ring(sc, &sc->rxq); 6330 6331 /* Reset all TX rings. */ 6332 for (qid = 0; qid < hal->ntxqs; qid++) 6333 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6334 6335 if (iwn_nic_lock(sc) == 0) { 6336 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6337 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6338 iwn_nic_unlock(sc); 6339 } 6340 DELAY(5); 6341 6342 /* Power OFF adapter. */ 6343 iwn_apm_stop(sc); 6344 } 6345 6346 static void 6347 iwn_init_locked(struct iwn_softc *sc) 6348 { 6349 struct ifnet *ifp = sc->sc_ifp; 6350 int error; 6351 6352 IWN_LOCK_ASSERT(sc); 6353 6354 error = iwn_hw_prepare(sc); 6355 if (error != 0) { 6356 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n", 6357 __func__, error); 6358 goto fail; 6359 } 6360 6361 /* Initialize interrupt mask to default value. */ 6362 sc->int_mask = IWN_INT_MASK_DEF; 6363 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6364 6365 /* Check that the radio is not disabled by hardware switch. */ 6366 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6367 device_printf(sc->sc_dev, 6368 "radio is disabled by hardware switch\n"); 6369 6370 /* Enable interrupts to get RF toggle notifications. */ 6371 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6372 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6373 return; 6374 } 6375 6376 /* Read firmware images from the filesystem. */ 6377 error = iwn_read_firmware(sc); 6378 if (error != 0) { 6379 device_printf(sc->sc_dev, 6380 "%s: could not read firmware, error %d\n", 6381 __func__, error); 6382 goto fail; 6383 } 6384 6385 /* Initialize hardware and upload firmware. */ 6386 error = iwn_hw_init(sc); 6387 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6388 sc->fw_fp = NULL; 6389 if (error != 0) { 6390 device_printf(sc->sc_dev, 6391 "%s: could not initialize hardware, error %d\n", 6392 __func__, error); 6393 goto fail; 6394 } 6395 6396 /* Configure adapter now that it is ready. */ 6397 error = iwn_config(sc); 6398 if (error != 0) { 6399 device_printf(sc->sc_dev, 6400 "%s: could not configure device, error %d\n", 6401 __func__, error); 6402 goto fail; 6403 } 6404 6405 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6406 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6407 6408 return; 6409 6410 fail: 6411 iwn_stop_locked(sc); 6412 } 6413 6414 static void 6415 iwn_init(void *arg) 6416 { 6417 struct iwn_softc *sc = arg; 6418 struct ifnet *ifp = sc->sc_ifp; 6419 struct ieee80211com *ic = ifp->if_l2com; 6420 6421 IWN_LOCK(sc); 6422 iwn_init_locked(sc); 6423 IWN_UNLOCK(sc); 6424 6425 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6426 ieee80211_start_all(ic); 6427 } 6428 6429 static void 6430 iwn_stop_locked(struct iwn_softc *sc) 6431 { 6432 struct ifnet *ifp = sc->sc_ifp; 6433 6434 IWN_LOCK_ASSERT(sc); 6435 6436 sc->sc_tx_timer = 0; 6437 callout_stop(&sc->sc_timer_to); 6438 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 6439 6440 /* Power OFF hardware. */ 6441 iwn_hw_stop(sc); 6442 } 6443 6444 static void 6445 iwn_stop(struct iwn_softc *sc) 6446 { 6447 IWN_LOCK(sc); 6448 iwn_stop_locked(sc); 6449 IWN_UNLOCK(sc); 6450 } 6451 6452 /* 6453 * Callback from net80211 to start a scan. 6454 */ 6455 static void 6456 iwn_scan_start(struct ieee80211com *ic) 6457 { 6458 struct ifnet *ifp = ic->ic_ifp; 6459 struct iwn_softc *sc = ifp->if_softc; 6460 6461 IWN_LOCK(sc); 6462 /* make the link LED blink while we're scanning */ 6463 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 6464 IWN_UNLOCK(sc); 6465 } 6466 6467 /* 6468 * Callback from net80211 to terminate a scan. 6469 */ 6470 static void 6471 iwn_scan_end(struct ieee80211com *ic) 6472 { 6473 struct ifnet *ifp = ic->ic_ifp; 6474 struct iwn_softc *sc = ifp->if_softc; 6475 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6476 6477 IWN_LOCK(sc); 6478 if (vap->iv_state == IEEE80211_S_RUN) { 6479 /* Set link LED to ON status if we are associated */ 6480 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6481 } 6482 IWN_UNLOCK(sc); 6483 } 6484 6485 /* 6486 * Callback from net80211 to force a channel change. 6487 */ 6488 static void 6489 iwn_set_channel(struct ieee80211com *ic) 6490 { 6491 const struct ieee80211_channel *c = ic->ic_curchan; 6492 struct ifnet *ifp = ic->ic_ifp; 6493 struct iwn_softc *sc = ifp->if_softc; 6494 6495 IWN_LOCK(sc); 6496 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 6497 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 6498 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 6499 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 6500 IWN_UNLOCK(sc); 6501 } 6502 6503 /* 6504 * Callback from net80211 to start scanning of the current channel. 6505 */ 6506 static void 6507 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6508 { 6509 struct ieee80211vap *vap = ss->ss_vap; 6510 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6511 int error; 6512 6513 IWN_LOCK(sc); 6514 error = iwn_scan(sc); 6515 IWN_UNLOCK(sc); 6516 if (error != 0) 6517 ieee80211_cancel_scan(vap); 6518 } 6519 6520 /* 6521 * Callback from net80211 to handle the minimum dwell time being met. 6522 * The intent is to terminate the scan but we just let the firmware 6523 * notify us when it's finished as we have no safe way to abort it. 6524 */ 6525 static void 6526 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 6527 { 6528 /* NB: don't try to abort scan; wait for firmware to finish */ 6529 } 6530 6531 static struct iwn_eeprom_chan * 6532 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 6533 { 6534 int i, j; 6535 6536 for (j = 0; j < 7; j++) { 6537 for (i = 0; i < iwn_bands[j].nchan; i++) { 6538 if (iwn_bands[j].chan[i] == c->ic_ieee) 6539 return &sc->eeprom_channels[j][i]; 6540 } 6541 } 6542 6543 return NULL; 6544 } 6545 6546 /* 6547 * Enforce flags read from EEPROM. 6548 */ 6549 static int 6550 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 6551 int nchan, struct ieee80211_channel chans[]) 6552 { 6553 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6554 int i; 6555 6556 for (i = 0; i < nchan; i++) { 6557 struct ieee80211_channel *c = &chans[i]; 6558 struct iwn_eeprom_chan *channel; 6559 6560 channel = iwn_find_eeprom_channel(sc, c); 6561 if (channel == NULL) { 6562 if_printf(ic->ic_ifp, 6563 "%s: invalid channel %u freq %u/0x%x\n", 6564 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 6565 return EINVAL; 6566 } 6567 c->ic_flags |= iwn_eeprom_channel_flags(channel); 6568 } 6569 6570 return 0; 6571 } 6572 6573 static void 6574 iwn_hw_reset(void *arg0, int pending) 6575 { 6576 struct iwn_softc *sc = arg0; 6577 struct ifnet *ifp = sc->sc_ifp; 6578 struct ieee80211com *ic = ifp->if_l2com; 6579 6580 iwn_stop(sc); 6581 iwn_init(sc); 6582 ieee80211_notify_radio(ic, 1); 6583 } 6584 6585 static void 6586 iwn_radio_on(void *arg0, int pending) 6587 { 6588 struct iwn_softc *sc = arg0; 6589 struct ifnet *ifp = sc->sc_ifp; 6590 struct ieee80211com *ic = ifp->if_l2com; 6591 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6592 6593 if (vap != NULL) { 6594 iwn_init(sc); 6595 ieee80211_init(vap); 6596 } 6597 } 6598 6599 static void 6600 iwn_radio_off(void *arg0, int pending) 6601 { 6602 struct iwn_softc *sc = arg0; 6603 struct ifnet *ifp = sc->sc_ifp; 6604 struct ieee80211com *ic = ifp->if_l2com; 6605 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6606 6607 iwn_stop(sc); 6608 if (vap != NULL) 6609 ieee80211_stop(vap); 6610 6611 /* Enable interrupts to get RF toggle notification. */ 6612 IWN_LOCK(sc); 6613 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6614 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6615 IWN_UNLOCK(sc); 6616 } 6617 6618 static void 6619 iwn_sysctlattach(struct iwn_softc *sc) 6620 { 6621 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 6622 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 6623 6624 #ifdef IWN_DEBUG 6625 sc->sc_debug = 0; 6626 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 6627 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 6628 #endif 6629 } 6630 6631 static int 6632 iwn_shutdown(device_t dev) 6633 { 6634 struct iwn_softc *sc = device_get_softc(dev); 6635 6636 iwn_stop(sc); 6637 return 0; 6638 } 6639 6640 static int 6641 iwn_suspend(device_t dev) 6642 { 6643 struct iwn_softc *sc = device_get_softc(dev); 6644 struct ifnet *ifp = sc->sc_ifp; 6645 struct ieee80211com *ic = ifp->if_l2com; 6646 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6647 6648 iwn_stop(sc); 6649 if (vap != NULL) 6650 ieee80211_stop(vap); 6651 return 0; 6652 } 6653 6654 static int 6655 iwn_resume(device_t dev) 6656 { 6657 struct iwn_softc *sc = device_get_softc(dev); 6658 struct ifnet *ifp = sc->sc_ifp; 6659 struct ieee80211com *ic = ifp->if_l2com; 6660 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6661 6662 /* Clear device-specific "PCI retry timeout" register (41h). */ 6663 pci_write_config(dev, 0x41, 0, 1); 6664 6665 if (ifp->if_flags & IFF_UP) { 6666 iwn_init(sc); 6667 if (vap != NULL) 6668 ieee80211_init(vap); 6669 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6670 iwn_start(ifp); 6671 } 6672 return 0; 6673 } 6674 6675 #ifdef IWN_DEBUG 6676 static const char * 6677 iwn_intr_str(uint8_t cmd) 6678 { 6679 switch (cmd) { 6680 /* Notifications */ 6681 case IWN_UC_READY: return "UC_READY"; 6682 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 6683 case IWN_TX_DONE: return "TX_DONE"; 6684 case IWN_START_SCAN: return "START_SCAN"; 6685 case IWN_STOP_SCAN: return "STOP_SCAN"; 6686 case IWN_RX_STATISTICS: return "RX_STATS"; 6687 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 6688 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 6689 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 6690 case IWN_RX_PHY: return "RX_PHY"; 6691 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 6692 case IWN_RX_DONE: return "RX_DONE"; 6693 6694 /* Command Notifications */ 6695 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 6696 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 6697 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 6698 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 6699 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 6700 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 6701 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 6702 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 6703 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 6704 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 6705 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 6706 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 6707 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 6708 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 6709 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 6710 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 6711 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 6712 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 6713 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 6714 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 6715 } 6716 return "UNKNOWN INTR NOTIF/CMD"; 6717 } 6718 #endif /* IWN_DEBUG */ 6719 6720 static device_method_t iwn_methods[] = { 6721 /* Device interface */ 6722 DEVMETHOD(device_probe, iwn_probe), 6723 DEVMETHOD(device_attach, iwn_attach), 6724 DEVMETHOD(device_detach, iwn_detach), 6725 DEVMETHOD(device_shutdown, iwn_shutdown), 6726 DEVMETHOD(device_suspend, iwn_suspend), 6727 DEVMETHOD(device_resume, iwn_resume), 6728 { 0, 0 } 6729 }; 6730 6731 static driver_t iwn_driver = { 6732 "iwn", 6733 iwn_methods, 6734 sizeof (struct iwn_softc) 6735 }; 6736 static devclass_t iwn_devclass; 6737 6738 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 6739 MODULE_DEPEND(iwn, pci, 1, 1, 1); 6740 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 6741 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 6742