1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_wlan.h" 31 #include "opt_iwn.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/sysctl.h> 36 #include <sys/mbuf.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/clock.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ethernet.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 #include <net/if_types.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/if_ether.h> 70 #include <netinet/ip.h> 71 72 #include <net80211/ieee80211_var.h> 73 #include <net80211/ieee80211_radiotap.h> 74 #include <net80211/ieee80211_regdomain.h> 75 #include <net80211/ieee80211_ratectl.h> 76 77 #include <dev/iwn/if_iwnreg.h> 78 #include <dev/iwn/if_iwnvar.h> 79 #include <dev/iwn/if_iwn_devid.h> 80 #include <dev/iwn/if_iwn_chip_cfg.h> 81 #include <dev/iwn/if_iwn_debug.h> 82 #include <dev/iwn/if_iwn_ioctl.h> 83 84 struct iwn_ident { 85 uint16_t vendor; 86 uint16_t device; 87 const char *name; 88 }; 89 90 static const struct iwn_ident iwn_ident_table[] = { 91 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 92 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 93 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 94 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 95 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 96 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 97 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 98 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 99 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 100 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 101 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 102 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 103 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 104 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 105 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 106 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 107 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 108 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 109 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 110 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 111 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 112 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" }, 113 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" }, 114 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, 115 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, 116 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 117 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 118 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 119 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 120 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 121 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 122 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 123 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 124 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 125 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 126 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 127 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 128 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 129 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 130 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 131 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 132 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 133 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 134 { 0, 0, NULL } 135 }; 136 137 static int iwn_probe(device_t); 138 static int iwn_attach(device_t); 139 static int iwn4965_attach(struct iwn_softc *, uint16_t); 140 static int iwn5000_attach(struct iwn_softc *, uint16_t); 141 static int iwn_config_specific(struct iwn_softc *, uint16_t); 142 static void iwn_radiotap_attach(struct iwn_softc *); 143 static void iwn_sysctlattach(struct iwn_softc *); 144 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 145 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 146 const uint8_t [IEEE80211_ADDR_LEN], 147 const uint8_t [IEEE80211_ADDR_LEN]); 148 static void iwn_vap_delete(struct ieee80211vap *); 149 static int iwn_detach(device_t); 150 static int iwn_shutdown(device_t); 151 static int iwn_suspend(device_t); 152 static int iwn_resume(device_t); 153 static int iwn_nic_lock(struct iwn_softc *); 154 static int iwn_eeprom_lock(struct iwn_softc *); 155 static int iwn_init_otprom(struct iwn_softc *); 156 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 157 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 158 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 159 void **, bus_size_t, bus_size_t); 160 static void iwn_dma_contig_free(struct iwn_dma_info *); 161 static int iwn_alloc_sched(struct iwn_softc *); 162 static void iwn_free_sched(struct iwn_softc *); 163 static int iwn_alloc_kw(struct iwn_softc *); 164 static void iwn_free_kw(struct iwn_softc *); 165 static int iwn_alloc_ict(struct iwn_softc *); 166 static void iwn_free_ict(struct iwn_softc *); 167 static int iwn_alloc_fwmem(struct iwn_softc *); 168 static void iwn_free_fwmem(struct iwn_softc *); 169 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 170 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 171 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 172 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 173 int); 174 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 175 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 176 static void iwn5000_ict_reset(struct iwn_softc *); 177 static int iwn_read_eeprom(struct iwn_softc *, 178 uint8_t macaddr[IEEE80211_ADDR_LEN]); 179 static void iwn4965_read_eeprom(struct iwn_softc *); 180 #ifdef IWN_DEBUG 181 static void iwn4965_print_power_group(struct iwn_softc *, int); 182 #endif 183 static void iwn5000_read_eeprom(struct iwn_softc *); 184 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 185 static void iwn_read_eeprom_band(struct iwn_softc *, int); 186 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 187 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 188 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 189 struct ieee80211_channel *); 190 static int iwn_setregdomain(struct ieee80211com *, 191 struct ieee80211_regdomain *, int, 192 struct ieee80211_channel[]); 193 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 194 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 195 const uint8_t mac[IEEE80211_ADDR_LEN]); 196 static void iwn_newassoc(struct ieee80211_node *, int); 197 static int iwn_media_change(struct ifnet *); 198 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 199 static void iwn_calib_timeout(void *); 200 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 201 struct iwn_rx_data *); 202 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 203 struct iwn_rx_data *); 204 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 205 struct iwn_rx_data *); 206 static void iwn5000_rx_calib_results(struct iwn_softc *, 207 struct iwn_rx_desc *, struct iwn_rx_data *); 208 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 209 struct iwn_rx_data *); 210 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 211 struct iwn_rx_data *); 212 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 213 struct iwn_rx_data *); 214 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 215 uint8_t); 216 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 217 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 218 static void iwn_notif_intr(struct iwn_softc *); 219 static void iwn_wakeup_intr(struct iwn_softc *); 220 static void iwn_rftoggle_intr(struct iwn_softc *); 221 static void iwn_fatal_intr(struct iwn_softc *); 222 static void iwn_intr(void *); 223 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 224 uint16_t); 225 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 226 uint16_t); 227 #ifdef notyet 228 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 229 #endif 230 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 231 struct ieee80211_node *); 232 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 233 struct ieee80211_node *, 234 const struct ieee80211_bpf_params *params); 235 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 236 const struct ieee80211_bpf_params *); 237 static void iwn_start(struct ifnet *); 238 static void iwn_start_locked(struct ifnet *); 239 static void iwn_watchdog(void *); 240 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 241 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 242 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 243 int); 244 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 245 int); 246 static int iwn_set_link_quality(struct iwn_softc *, 247 struct ieee80211_node *); 248 static int iwn_add_broadcast_node(struct iwn_softc *, int); 249 static int iwn_updateedca(struct ieee80211com *); 250 static void iwn_update_mcast(struct ifnet *); 251 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 252 static int iwn_set_critical_temp(struct iwn_softc *); 253 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 254 static void iwn4965_power_calibration(struct iwn_softc *, int); 255 static int iwn4965_set_txpower(struct iwn_softc *, 256 struct ieee80211_channel *, int); 257 static int iwn5000_set_txpower(struct iwn_softc *, 258 struct ieee80211_channel *, int); 259 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 260 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 261 static int iwn_get_noise(const struct iwn_rx_general_stats *); 262 static int iwn4965_get_temperature(struct iwn_softc *); 263 static int iwn5000_get_temperature(struct iwn_softc *); 264 static int iwn_init_sensitivity(struct iwn_softc *); 265 static void iwn_collect_noise(struct iwn_softc *, 266 const struct iwn_rx_general_stats *); 267 static int iwn4965_init_gains(struct iwn_softc *); 268 static int iwn5000_init_gains(struct iwn_softc *); 269 static int iwn4965_set_gains(struct iwn_softc *); 270 static int iwn5000_set_gains(struct iwn_softc *); 271 static void iwn_tune_sensitivity(struct iwn_softc *, 272 const struct iwn_rx_stats *); 273 static void iwn_save_stats_counters(struct iwn_softc *, 274 const struct iwn_stats *); 275 static int iwn_send_sensitivity(struct iwn_softc *); 276 static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 277 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 278 static int iwn_send_btcoex(struct iwn_softc *); 279 static int iwn_send_advanced_btcoex(struct iwn_softc *); 280 static int iwn5000_runtime_calib(struct iwn_softc *); 281 static int iwn_config(struct iwn_softc *); 282 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 283 static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 284 struct ieee80211_scan_state *, struct ieee80211_channel *); 285 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 286 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 287 static int iwn_ampdu_rx_start(struct ieee80211_node *, 288 struct ieee80211_rx_ampdu *, int, int, int); 289 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 290 struct ieee80211_rx_ampdu *); 291 static int iwn_addba_request(struct ieee80211_node *, 292 struct ieee80211_tx_ampdu *, int, int, int); 293 static int iwn_addba_response(struct ieee80211_node *, 294 struct ieee80211_tx_ampdu *, int, int, int); 295 static int iwn_ampdu_tx_start(struct ieee80211com *, 296 struct ieee80211_node *, uint8_t); 297 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 298 struct ieee80211_tx_ampdu *); 299 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 300 struct ieee80211_node *, int, uint8_t, uint16_t); 301 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 302 uint8_t, uint16_t); 303 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 304 struct ieee80211_node *, int, uint8_t, uint16_t); 305 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 306 uint8_t, uint16_t); 307 static int iwn5000_query_calibration(struct iwn_softc *); 308 static int iwn5000_send_calibration(struct iwn_softc *); 309 static int iwn5000_send_wimax_coex(struct iwn_softc *); 310 static int iwn5000_crystal_calib(struct iwn_softc *); 311 static int iwn5000_temp_offset_calib(struct iwn_softc *); 312 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 313 static int iwn4965_post_alive(struct iwn_softc *); 314 static int iwn5000_post_alive(struct iwn_softc *); 315 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 316 int); 317 static int iwn4965_load_firmware(struct iwn_softc *); 318 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 319 const uint8_t *, int); 320 static int iwn5000_load_firmware(struct iwn_softc *); 321 static int iwn_read_firmware_leg(struct iwn_softc *, 322 struct iwn_fw_info *); 323 static int iwn_read_firmware_tlv(struct iwn_softc *, 324 struct iwn_fw_info *, uint16_t); 325 static int iwn_read_firmware(struct iwn_softc *); 326 static int iwn_clock_wait(struct iwn_softc *); 327 static int iwn_apm_init(struct iwn_softc *); 328 static void iwn_apm_stop_master(struct iwn_softc *); 329 static void iwn_apm_stop(struct iwn_softc *); 330 static int iwn4965_nic_config(struct iwn_softc *); 331 static int iwn5000_nic_config(struct iwn_softc *); 332 static int iwn_hw_prepare(struct iwn_softc *); 333 static int iwn_hw_init(struct iwn_softc *); 334 static void iwn_hw_stop(struct iwn_softc *); 335 static void iwn_radio_on(void *, int); 336 static void iwn_radio_off(void *, int); 337 static void iwn_panicked(void *, int); 338 static void iwn_init_locked(struct iwn_softc *); 339 static void iwn_init(void *); 340 static void iwn_stop_locked(struct iwn_softc *); 341 static void iwn_stop(struct iwn_softc *); 342 static void iwn_scan_start(struct ieee80211com *); 343 static void iwn_scan_end(struct ieee80211com *); 344 static void iwn_set_channel(struct ieee80211com *); 345 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 346 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 347 static void iwn_hw_reset(void *, int); 348 #ifdef IWN_DEBUG 349 static char *iwn_get_csr_string(int); 350 static void iwn_debug_register(struct iwn_softc *); 351 #endif 352 353 static device_method_t iwn_methods[] = { 354 /* Device interface */ 355 DEVMETHOD(device_probe, iwn_probe), 356 DEVMETHOD(device_attach, iwn_attach), 357 DEVMETHOD(device_detach, iwn_detach), 358 DEVMETHOD(device_shutdown, iwn_shutdown), 359 DEVMETHOD(device_suspend, iwn_suspend), 360 DEVMETHOD(device_resume, iwn_resume), 361 362 DEVMETHOD_END 363 }; 364 365 static driver_t iwn_driver = { 366 "iwn", 367 iwn_methods, 368 sizeof(struct iwn_softc) 369 }; 370 static devclass_t iwn_devclass; 371 372 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 373 374 MODULE_VERSION(iwn, 1); 375 376 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 377 MODULE_DEPEND(iwn, pci, 1, 1, 1); 378 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 379 380 static int 381 iwn_probe(device_t dev) 382 { 383 const struct iwn_ident *ident; 384 385 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 386 if (pci_get_vendor(dev) == ident->vendor && 387 pci_get_device(dev) == ident->device) { 388 device_set_desc(dev, ident->name); 389 return (BUS_PROBE_DEFAULT); 390 } 391 } 392 return ENXIO; 393 } 394 395 static int 396 iwn_attach(device_t dev) 397 { 398 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 399 struct ieee80211com *ic; 400 struct ifnet *ifp; 401 int i, error, rid; 402 uint8_t macaddr[IEEE80211_ADDR_LEN]; 403 404 sc->sc_dev = dev; 405 406 #ifdef IWN_DEBUG 407 error = resource_int_value(device_get_name(sc->sc_dev), 408 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 409 if (error != 0) 410 sc->sc_debug = 0; 411 #else 412 sc->sc_debug = 0; 413 #endif 414 415 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 416 417 /* 418 * Get the offset of the PCI Express Capability Structure in PCI 419 * Configuration Space. 420 */ 421 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 422 if (error != 0) { 423 device_printf(dev, "PCIe capability structure not found!\n"); 424 return error; 425 } 426 427 /* Clear device-specific "PCI retry timeout" register (41h). */ 428 pci_write_config(dev, 0x41, 0, 1); 429 430 /* Enable bus-mastering. */ 431 pci_enable_busmaster(dev); 432 433 rid = PCIR_BAR(0); 434 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 435 RF_ACTIVE); 436 if (sc->mem == NULL) { 437 device_printf(dev, "can't map mem space\n"); 438 error = ENOMEM; 439 return error; 440 } 441 sc->sc_st = rman_get_bustag(sc->mem); 442 sc->sc_sh = rman_get_bushandle(sc->mem); 443 444 i = 1; 445 rid = 0; 446 if (pci_alloc_msi(dev, &i) == 0) 447 rid = 1; 448 /* Install interrupt handler. */ 449 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 450 (rid != 0 ? 0 : RF_SHAREABLE)); 451 if (sc->irq == NULL) { 452 device_printf(dev, "can't map interrupt\n"); 453 error = ENOMEM; 454 goto fail; 455 } 456 457 IWN_LOCK_INIT(sc); 458 459 /* Read hardware revision and attach. */ 460 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 461 & IWN_HW_REV_TYPE_MASK; 462 sc->subdevice_id = pci_get_subdevice(dev); 463 464 /* 465 * 4965 versus 5000 and later have different methods. 466 * Let's set those up first. 467 */ 468 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 469 error = iwn4965_attach(sc, pci_get_device(dev)); 470 else 471 error = iwn5000_attach(sc, pci_get_device(dev)); 472 if (error != 0) { 473 device_printf(dev, "could not attach device, error %d\n", 474 error); 475 goto fail; 476 } 477 478 /* 479 * Next, let's setup the various parameters of each NIC. 480 */ 481 error = iwn_config_specific(sc, pci_get_device(dev)); 482 if (error != 0) { 483 device_printf(dev, "could not attach device, error %d\n", 484 error); 485 goto fail; 486 } 487 488 if ((error = iwn_hw_prepare(sc)) != 0) { 489 device_printf(dev, "hardware not ready, error %d\n", error); 490 goto fail; 491 } 492 493 /* Allocate DMA memory for firmware transfers. */ 494 if ((error = iwn_alloc_fwmem(sc)) != 0) { 495 device_printf(dev, 496 "could not allocate memory for firmware, error %d\n", 497 error); 498 goto fail; 499 } 500 501 /* Allocate "Keep Warm" page. */ 502 if ((error = iwn_alloc_kw(sc)) != 0) { 503 device_printf(dev, 504 "could not allocate keep warm page, error %d\n", error); 505 goto fail; 506 } 507 508 /* Allocate ICT table for 5000 Series. */ 509 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 510 (error = iwn_alloc_ict(sc)) != 0) { 511 device_printf(dev, "could not allocate ICT table, error %d\n", 512 error); 513 goto fail; 514 } 515 516 /* Allocate TX scheduler "rings". */ 517 if ((error = iwn_alloc_sched(sc)) != 0) { 518 device_printf(dev, 519 "could not allocate TX scheduler rings, error %d\n", error); 520 goto fail; 521 } 522 523 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 524 for (i = 0; i < sc->ntxqs; i++) { 525 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 526 device_printf(dev, 527 "could not allocate TX ring %d, error %d\n", i, 528 error); 529 goto fail; 530 } 531 } 532 533 /* Allocate RX ring. */ 534 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 535 device_printf(dev, "could not allocate RX ring, error %d\n", 536 error); 537 goto fail; 538 } 539 540 /* Clear pending interrupts. */ 541 IWN_WRITE(sc, IWN_INT, 0xffffffff); 542 543 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 544 if (ifp == NULL) { 545 device_printf(dev, "can not allocate ifnet structure\n"); 546 goto fail; 547 } 548 549 ic = ifp->if_l2com; 550 ic->ic_ifp = ifp; 551 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 552 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 553 554 /* Set device capabilities. */ 555 ic->ic_caps = 556 IEEE80211_C_STA /* station mode supported */ 557 | IEEE80211_C_MONITOR /* monitor mode supported */ 558 | IEEE80211_C_BGSCAN /* background scanning */ 559 | IEEE80211_C_TXPMGT /* tx power management */ 560 | IEEE80211_C_SHSLOT /* short slot time supported */ 561 | IEEE80211_C_WPA 562 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 563 #if 0 564 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 565 #endif 566 | IEEE80211_C_WME /* WME */ 567 | IEEE80211_C_PMGT /* Station-side power mgmt */ 568 ; 569 570 /* Read MAC address, channels, etc from EEPROM. */ 571 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 572 device_printf(dev, "could not read EEPROM, error %d\n", 573 error); 574 goto fail; 575 } 576 577 /* Count the number of available chains. */ 578 sc->ntxchains = 579 ((sc->txchainmask >> 2) & 1) + 580 ((sc->txchainmask >> 1) & 1) + 581 ((sc->txchainmask >> 0) & 1); 582 sc->nrxchains = 583 ((sc->rxchainmask >> 2) & 1) + 584 ((sc->rxchainmask >> 1) & 1) + 585 ((sc->rxchainmask >> 0) & 1); 586 if (bootverbose) { 587 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 588 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 589 macaddr, ":"); 590 } 591 592 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 593 ic->ic_rxstream = sc->nrxchains; 594 ic->ic_txstream = sc->ntxchains; 595 596 /* 597 * The NICs we currently support cap out at 2x2 support 598 * separate from the chains being used. 599 * 600 * This is a total hack to work around that until some 601 * per-device method is implemented to return the 602 * actual stream support. 603 * 604 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 605 * cap this! But, anything that touches rates in the 606 * driver needs to be audited first before 3x3 is enabled. 607 */ 608 if (ic->ic_rxstream > 2) 609 ic->ic_rxstream = 2; 610 if (ic->ic_txstream > 2) 611 ic->ic_txstream = 2; 612 613 ic->ic_htcaps = 614 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 615 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 616 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 617 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 618 #ifdef notyet 619 | IEEE80211_HTCAP_GREENFIELD 620 #if IWN_RBUF_SIZE == 8192 621 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 622 #else 623 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 624 #endif 625 #endif 626 /* s/w capabilities */ 627 | IEEE80211_HTC_HT /* HT operation */ 628 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 629 #ifdef notyet 630 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 631 #endif 632 ; 633 } 634 635 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 636 ifp->if_softc = sc; 637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 638 ifp->if_init = iwn_init; 639 ifp->if_ioctl = iwn_ioctl; 640 ifp->if_start = iwn_start; 641 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 642 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 643 IFQ_SET_READY(&ifp->if_snd); 644 645 ieee80211_ifattach(ic, macaddr); 646 ic->ic_vap_create = iwn_vap_create; 647 ic->ic_vap_delete = iwn_vap_delete; 648 ic->ic_raw_xmit = iwn_raw_xmit; 649 ic->ic_node_alloc = iwn_node_alloc; 650 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 651 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 652 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 653 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 654 sc->sc_addba_request = ic->ic_addba_request; 655 ic->ic_addba_request = iwn_addba_request; 656 sc->sc_addba_response = ic->ic_addba_response; 657 ic->ic_addba_response = iwn_addba_response; 658 sc->sc_addba_stop = ic->ic_addba_stop; 659 ic->ic_addba_stop = iwn_ampdu_tx_stop; 660 ic->ic_newassoc = iwn_newassoc; 661 ic->ic_wme.wme_update = iwn_updateedca; 662 ic->ic_update_mcast = iwn_update_mcast; 663 ic->ic_scan_start = iwn_scan_start; 664 ic->ic_scan_end = iwn_scan_end; 665 ic->ic_set_channel = iwn_set_channel; 666 ic->ic_scan_curchan = iwn_scan_curchan; 667 ic->ic_scan_mindwell = iwn_scan_mindwell; 668 ic->ic_setregdomain = iwn_setregdomain; 669 670 iwn_radiotap_attach(sc); 671 672 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 673 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 674 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 675 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 676 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 677 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc); 678 679 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK, 680 taskqueue_thread_enqueue, &sc->sc_tq); 681 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq"); 682 if (error != 0) { 683 device_printf(dev, "can't start threads, error %d\n", error); 684 goto fail; 685 } 686 687 iwn_sysctlattach(sc); 688 689 /* 690 * Hook our interrupt after all initialization is complete. 691 */ 692 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 693 NULL, iwn_intr, sc, &sc->sc_ih); 694 if (error != 0) { 695 device_printf(dev, "can't establish interrupt, error %d\n", 696 error); 697 goto fail; 698 } 699 700 #if 0 701 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n", 702 __func__, 703 sizeof(struct iwn_stats), 704 sizeof(struct iwn_stats_bt)); 705 #endif 706 707 if (bootverbose) 708 ieee80211_announce(ic); 709 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 710 return 0; 711 fail: 712 iwn_detach(dev); 713 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 714 return error; 715 } 716 717 /* 718 * Define specific configuration based on device id and subdevice id 719 * pid : PCI device id 720 */ 721 static int 722 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 723 { 724 725 switch (pid) { 726 /* 4965 series */ 727 case IWN_DID_4965_1: 728 case IWN_DID_4965_2: 729 case IWN_DID_4965_3: 730 case IWN_DID_4965_4: 731 sc->base_params = &iwn4965_base_params; 732 sc->limits = &iwn4965_sensitivity_limits; 733 sc->fwname = "iwn4965fw"; 734 /* Override chains masks, ROM is known to be broken. */ 735 sc->txchainmask = IWN_ANT_AB; 736 sc->rxchainmask = IWN_ANT_ABC; 737 /* Enable normal btcoex */ 738 sc->sc_flags |= IWN_FLAG_BTCOEX; 739 break; 740 /* 1000 Series */ 741 case IWN_DID_1000_1: 742 case IWN_DID_1000_2: 743 switch(sc->subdevice_id) { 744 case IWN_SDID_1000_1: 745 case IWN_SDID_1000_2: 746 case IWN_SDID_1000_3: 747 case IWN_SDID_1000_4: 748 case IWN_SDID_1000_5: 749 case IWN_SDID_1000_6: 750 case IWN_SDID_1000_7: 751 case IWN_SDID_1000_8: 752 case IWN_SDID_1000_9: 753 case IWN_SDID_1000_10: 754 case IWN_SDID_1000_11: 755 case IWN_SDID_1000_12: 756 sc->limits = &iwn1000_sensitivity_limits; 757 sc->base_params = &iwn1000_base_params; 758 sc->fwname = "iwn1000fw"; 759 break; 760 default: 761 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 762 "0x%04x rev %d not supported (subdevice)\n", pid, 763 sc->subdevice_id,sc->hw_type); 764 return ENOTSUP; 765 } 766 break; 767 /* 6x00 Series */ 768 case IWN_DID_6x00_2: 769 case IWN_DID_6x00_4: 770 case IWN_DID_6x00_1: 771 case IWN_DID_6x00_3: 772 sc->fwname = "iwn6000fw"; 773 sc->limits = &iwn6000_sensitivity_limits; 774 switch(sc->subdevice_id) { 775 case IWN_SDID_6x00_1: 776 case IWN_SDID_6x00_2: 777 case IWN_SDID_6x00_8: 778 //iwl6000_3agn_cfg 779 sc->base_params = &iwn_6000_base_params; 780 break; 781 case IWN_SDID_6x00_3: 782 case IWN_SDID_6x00_6: 783 case IWN_SDID_6x00_9: 784 ////iwl6000i_2agn 785 case IWN_SDID_6x00_4: 786 case IWN_SDID_6x00_7: 787 case IWN_SDID_6x00_10: 788 //iwl6000i_2abg_cfg 789 case IWN_SDID_6x00_5: 790 //iwl6000i_2bg_cfg 791 sc->base_params = &iwn_6000i_base_params; 792 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 793 sc->txchainmask = IWN_ANT_BC; 794 sc->rxchainmask = IWN_ANT_BC; 795 break; 796 default: 797 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 798 "0x%04x rev %d not supported (subdevice)\n", pid, 799 sc->subdevice_id,sc->hw_type); 800 return ENOTSUP; 801 } 802 break; 803 /* 6x05 Series */ 804 case IWN_DID_6x05_1: 805 case IWN_DID_6x05_2: 806 switch(sc->subdevice_id) { 807 case IWN_SDID_6x05_1: 808 case IWN_SDID_6x05_4: 809 case IWN_SDID_6x05_6: 810 //iwl6005_2agn_cfg 811 case IWN_SDID_6x05_2: 812 case IWN_SDID_6x05_5: 813 case IWN_SDID_6x05_7: 814 //iwl6005_2abg_cfg 815 case IWN_SDID_6x05_3: 816 //iwl6005_2bg_cfg 817 case IWN_SDID_6x05_8: 818 case IWN_SDID_6x05_9: 819 //iwl6005_2agn_sff_cfg 820 case IWN_SDID_6x05_10: 821 //iwl6005_2agn_d_cfg 822 case IWN_SDID_6x05_11: 823 //iwl6005_2agn_mow1_cfg 824 case IWN_SDID_6x05_12: 825 //iwl6005_2agn_mow2_cfg 826 sc->fwname = "iwn6000g2afw"; 827 sc->limits = &iwn6000_sensitivity_limits; 828 sc->base_params = &iwn_6000g2_base_params; 829 break; 830 default: 831 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 832 "0x%04x rev %d not supported (subdevice)\n", pid, 833 sc->subdevice_id,sc->hw_type); 834 return ENOTSUP; 835 } 836 break; 837 /* 6x35 Series */ 838 case IWN_DID_6035_1: 839 case IWN_DID_6035_2: 840 switch(sc->subdevice_id) { 841 case IWN_SDID_6035_1: 842 case IWN_SDID_6035_2: 843 case IWN_SDID_6035_3: 844 case IWN_SDID_6035_4: 845 sc->fwname = "iwn6000g2bfw"; 846 sc->limits = &iwn6235_sensitivity_limits; 847 sc->base_params = &iwn_6235_base_params; 848 break; 849 default: 850 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 851 "0x%04x rev %d not supported (subdevice)\n", pid, 852 sc->subdevice_id,sc->hw_type); 853 return ENOTSUP; 854 } 855 break; 856 /* 6x50 WiFi/WiMax Series */ 857 case IWN_DID_6050_1: 858 case IWN_DID_6050_2: 859 switch(sc->subdevice_id) { 860 case IWN_SDID_6050_1: 861 case IWN_SDID_6050_3: 862 case IWN_SDID_6050_5: 863 //iwl6050_2agn_cfg 864 case IWN_SDID_6050_2: 865 case IWN_SDID_6050_4: 866 case IWN_SDID_6050_6: 867 //iwl6050_2abg_cfg 868 sc->fwname = "iwn6050fw"; 869 sc->txchainmask = IWN_ANT_AB; 870 sc->rxchainmask = IWN_ANT_AB; 871 sc->limits = &iwn6000_sensitivity_limits; 872 sc->base_params = &iwn_6050_base_params; 873 break; 874 default: 875 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 876 "0x%04x rev %d not supported (subdevice)\n", pid, 877 sc->subdevice_id,sc->hw_type); 878 return ENOTSUP; 879 } 880 break; 881 /* 6150 WiFi/WiMax Series */ 882 case IWN_DID_6150_1: 883 case IWN_DID_6150_2: 884 switch(sc->subdevice_id) { 885 case IWN_SDID_6150_1: 886 case IWN_SDID_6150_3: 887 case IWN_SDID_6150_5: 888 // iwl6150_bgn_cfg 889 case IWN_SDID_6150_2: 890 case IWN_SDID_6150_4: 891 case IWN_SDID_6150_6: 892 //iwl6150_bg_cfg 893 sc->fwname = "iwn6050fw"; 894 sc->limits = &iwn6000_sensitivity_limits; 895 sc->base_params = &iwn_6150_base_params; 896 break; 897 default: 898 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 899 "0x%04x rev %d not supported (subdevice)\n", pid, 900 sc->subdevice_id,sc->hw_type); 901 return ENOTSUP; 902 } 903 break; 904 /* 6030 Series and 1030 Series */ 905 case IWN_DID_x030_1: 906 case IWN_DID_x030_2: 907 case IWN_DID_x030_3: 908 case IWN_DID_x030_4: 909 switch(sc->subdevice_id) { 910 case IWN_SDID_x030_1: 911 case IWN_SDID_x030_3: 912 case IWN_SDID_x030_5: 913 // iwl1030_bgn_cfg 914 case IWN_SDID_x030_2: 915 case IWN_SDID_x030_4: 916 case IWN_SDID_x030_6: 917 //iwl1030_bg_cfg 918 case IWN_SDID_x030_7: 919 case IWN_SDID_x030_10: 920 case IWN_SDID_x030_14: 921 //iwl6030_2agn_cfg 922 case IWN_SDID_x030_8: 923 case IWN_SDID_x030_11: 924 case IWN_SDID_x030_15: 925 // iwl6030_2bgn_cfg 926 case IWN_SDID_x030_9: 927 case IWN_SDID_x030_12: 928 case IWN_SDID_x030_16: 929 // iwl6030_2abg_cfg 930 case IWN_SDID_x030_13: 931 //iwl6030_2bg_cfg 932 sc->fwname = "iwn6000g2bfw"; 933 sc->limits = &iwn6000_sensitivity_limits; 934 sc->base_params = &iwn_6000g2b_base_params; 935 break; 936 default: 937 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 938 "0x%04x rev %d not supported (subdevice)\n", pid, 939 sc->subdevice_id,sc->hw_type); 940 return ENOTSUP; 941 } 942 break; 943 /* 130 Series WiFi */ 944 /* XXX: This series will need adjustment for rate. 945 * see rx_with_siso_diversity in linux kernel 946 */ 947 case IWN_DID_130_1: 948 case IWN_DID_130_2: 949 switch(sc->subdevice_id) { 950 case IWN_SDID_130_1: 951 case IWN_SDID_130_3: 952 case IWN_SDID_130_5: 953 //iwl130_bgn_cfg 954 case IWN_SDID_130_2: 955 case IWN_SDID_130_4: 956 case IWN_SDID_130_6: 957 //iwl130_bg_cfg 958 sc->fwname = "iwn6000g2bfw"; 959 sc->limits = &iwn6000_sensitivity_limits; 960 sc->base_params = &iwn_6000g2b_base_params; 961 break; 962 default: 963 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 964 "0x%04x rev %d not supported (subdevice)\n", pid, 965 sc->subdevice_id,sc->hw_type); 966 return ENOTSUP; 967 } 968 break; 969 /* 100 Series WiFi */ 970 case IWN_DID_100_1: 971 case IWN_DID_100_2: 972 switch(sc->subdevice_id) { 973 case IWN_SDID_100_1: 974 case IWN_SDID_100_2: 975 case IWN_SDID_100_3: 976 case IWN_SDID_100_4: 977 case IWN_SDID_100_5: 978 case IWN_SDID_100_6: 979 sc->limits = &iwn1000_sensitivity_limits; 980 sc->base_params = &iwn1000_base_params; 981 sc->fwname = "iwn100fw"; 982 break; 983 default: 984 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 985 "0x%04x rev %d not supported (subdevice)\n", pid, 986 sc->subdevice_id,sc->hw_type); 987 return ENOTSUP; 988 } 989 break; 990 991 /* 105 Series */ 992 /* XXX: This series will need adjustment for rate. 993 * see rx_with_siso_diversity in linux kernel 994 */ 995 case IWN_DID_105_1: 996 case IWN_DID_105_2: 997 switch(sc->subdevice_id) { 998 case IWN_SDID_105_1: 999 case IWN_SDID_105_2: 1000 case IWN_SDID_105_3: 1001 //iwl105_bgn_cfg 1002 case IWN_SDID_105_4: 1003 //iwl105_bgn_d_cfg 1004 sc->limits = &iwn2030_sensitivity_limits; 1005 sc->base_params = &iwn2000_base_params; 1006 sc->fwname = "iwn105fw"; 1007 break; 1008 default: 1009 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1010 "0x%04x rev %d not supported (subdevice)\n", pid, 1011 sc->subdevice_id,sc->hw_type); 1012 return ENOTSUP; 1013 } 1014 break; 1015 1016 /* 135 Series */ 1017 /* XXX: This series will need adjustment for rate. 1018 * see rx_with_siso_diversity in linux kernel 1019 */ 1020 case IWN_DID_135_1: 1021 case IWN_DID_135_2: 1022 switch(sc->subdevice_id) { 1023 case IWN_SDID_135_1: 1024 case IWN_SDID_135_2: 1025 case IWN_SDID_135_3: 1026 sc->limits = &iwn2030_sensitivity_limits; 1027 sc->base_params = &iwn2030_base_params; 1028 sc->fwname = "iwn135fw"; 1029 break; 1030 default: 1031 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1032 "0x%04x rev %d not supported (subdevice)\n", pid, 1033 sc->subdevice_id,sc->hw_type); 1034 return ENOTSUP; 1035 } 1036 break; 1037 1038 /* 2x00 Series */ 1039 case IWN_DID_2x00_1: 1040 case IWN_DID_2x00_2: 1041 switch(sc->subdevice_id) { 1042 case IWN_SDID_2x00_1: 1043 case IWN_SDID_2x00_2: 1044 case IWN_SDID_2x00_3: 1045 //iwl2000_2bgn_cfg 1046 case IWN_SDID_2x00_4: 1047 //iwl2000_2bgn_d_cfg 1048 sc->limits = &iwn2030_sensitivity_limits; 1049 sc->base_params = &iwn2000_base_params; 1050 sc->fwname = "iwn2000fw"; 1051 break; 1052 default: 1053 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1054 "0x%04x rev %d not supported (subdevice) \n", 1055 pid, sc->subdevice_id, sc->hw_type); 1056 return ENOTSUP; 1057 } 1058 break; 1059 /* 2x30 Series */ 1060 case IWN_DID_2x30_1: 1061 case IWN_DID_2x30_2: 1062 switch(sc->subdevice_id) { 1063 case IWN_SDID_2x30_1: 1064 case IWN_SDID_2x30_3: 1065 case IWN_SDID_2x30_5: 1066 //iwl100_bgn_cfg 1067 case IWN_SDID_2x30_2: 1068 case IWN_SDID_2x30_4: 1069 case IWN_SDID_2x30_6: 1070 //iwl100_bg_cfg 1071 sc->limits = &iwn2030_sensitivity_limits; 1072 sc->base_params = &iwn2030_base_params; 1073 sc->fwname = "iwn2030fw"; 1074 break; 1075 default: 1076 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1077 "0x%04x rev %d not supported (subdevice)\n", pid, 1078 sc->subdevice_id,sc->hw_type); 1079 return ENOTSUP; 1080 } 1081 break; 1082 /* 5x00 Series */ 1083 case IWN_DID_5x00_1: 1084 case IWN_DID_5x00_2: 1085 case IWN_DID_5x00_3: 1086 case IWN_DID_5x00_4: 1087 sc->limits = &iwn5000_sensitivity_limits; 1088 sc->base_params = &iwn5000_base_params; 1089 sc->fwname = "iwn5000fw"; 1090 switch(sc->subdevice_id) { 1091 case IWN_SDID_5x00_1: 1092 case IWN_SDID_5x00_2: 1093 case IWN_SDID_5x00_3: 1094 case IWN_SDID_5x00_4: 1095 case IWN_SDID_5x00_9: 1096 case IWN_SDID_5x00_10: 1097 case IWN_SDID_5x00_11: 1098 case IWN_SDID_5x00_12: 1099 case IWN_SDID_5x00_17: 1100 case IWN_SDID_5x00_18: 1101 case IWN_SDID_5x00_19: 1102 case IWN_SDID_5x00_20: 1103 //iwl5100_agn_cfg 1104 sc->txchainmask = IWN_ANT_B; 1105 sc->rxchainmask = IWN_ANT_AB; 1106 break; 1107 case IWN_SDID_5x00_5: 1108 case IWN_SDID_5x00_6: 1109 case IWN_SDID_5x00_13: 1110 case IWN_SDID_5x00_14: 1111 case IWN_SDID_5x00_21: 1112 case IWN_SDID_5x00_22: 1113 //iwl5100_bgn_cfg 1114 sc->txchainmask = IWN_ANT_B; 1115 sc->rxchainmask = IWN_ANT_AB; 1116 break; 1117 case IWN_SDID_5x00_7: 1118 case IWN_SDID_5x00_8: 1119 case IWN_SDID_5x00_15: 1120 case IWN_SDID_5x00_16: 1121 case IWN_SDID_5x00_23: 1122 case IWN_SDID_5x00_24: 1123 //iwl5100_abg_cfg 1124 sc->txchainmask = IWN_ANT_B; 1125 sc->rxchainmask = IWN_ANT_AB; 1126 break; 1127 case IWN_SDID_5x00_25: 1128 case IWN_SDID_5x00_26: 1129 case IWN_SDID_5x00_27: 1130 case IWN_SDID_5x00_28: 1131 case IWN_SDID_5x00_29: 1132 case IWN_SDID_5x00_30: 1133 case IWN_SDID_5x00_31: 1134 case IWN_SDID_5x00_32: 1135 case IWN_SDID_5x00_33: 1136 case IWN_SDID_5x00_34: 1137 case IWN_SDID_5x00_35: 1138 case IWN_SDID_5x00_36: 1139 //iwl5300_agn_cfg 1140 sc->txchainmask = IWN_ANT_ABC; 1141 sc->rxchainmask = IWN_ANT_ABC; 1142 break; 1143 default: 1144 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1145 "0x%04x rev %d not supported (subdevice)\n", pid, 1146 sc->subdevice_id,sc->hw_type); 1147 return ENOTSUP; 1148 } 1149 break; 1150 /* 5x50 Series */ 1151 case IWN_DID_5x50_1: 1152 case IWN_DID_5x50_2: 1153 case IWN_DID_5x50_3: 1154 case IWN_DID_5x50_4: 1155 sc->limits = &iwn5000_sensitivity_limits; 1156 sc->base_params = &iwn5000_base_params; 1157 sc->fwname = "iwn5000fw"; 1158 switch(sc->subdevice_id) { 1159 case IWN_SDID_5x50_1: 1160 case IWN_SDID_5x50_2: 1161 case IWN_SDID_5x50_3: 1162 //iwl5350_agn_cfg 1163 sc->limits = &iwn5000_sensitivity_limits; 1164 sc->base_params = &iwn5000_base_params; 1165 sc->fwname = "iwn5000fw"; 1166 break; 1167 case IWN_SDID_5x50_4: 1168 case IWN_SDID_5x50_5: 1169 case IWN_SDID_5x50_8: 1170 case IWN_SDID_5x50_9: 1171 case IWN_SDID_5x50_10: 1172 case IWN_SDID_5x50_11: 1173 //iwl5150_agn_cfg 1174 case IWN_SDID_5x50_6: 1175 case IWN_SDID_5x50_7: 1176 case IWN_SDID_5x50_12: 1177 case IWN_SDID_5x50_13: 1178 //iwl5150_abg_cfg 1179 sc->limits = &iwn5000_sensitivity_limits; 1180 sc->fwname = "iwn5150fw"; 1181 sc->base_params = &iwn_5x50_base_params; 1182 break; 1183 default: 1184 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1185 "0x%04x rev %d not supported (subdevice)\n", pid, 1186 sc->subdevice_id,sc->hw_type); 1187 return ENOTSUP; 1188 } 1189 break; 1190 default: 1191 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1192 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1193 sc->hw_type); 1194 return ENOTSUP; 1195 } 1196 return 0; 1197 } 1198 1199 static int 1200 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1201 { 1202 struct iwn_ops *ops = &sc->ops; 1203 1204 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1205 ops->load_firmware = iwn4965_load_firmware; 1206 ops->read_eeprom = iwn4965_read_eeprom; 1207 ops->post_alive = iwn4965_post_alive; 1208 ops->nic_config = iwn4965_nic_config; 1209 ops->update_sched = iwn4965_update_sched; 1210 ops->get_temperature = iwn4965_get_temperature; 1211 ops->get_rssi = iwn4965_get_rssi; 1212 ops->set_txpower = iwn4965_set_txpower; 1213 ops->init_gains = iwn4965_init_gains; 1214 ops->set_gains = iwn4965_set_gains; 1215 ops->add_node = iwn4965_add_node; 1216 ops->tx_done = iwn4965_tx_done; 1217 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1218 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1219 sc->ntxqs = IWN4965_NTXQUEUES; 1220 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1221 sc->ndmachnls = IWN4965_NDMACHNLS; 1222 sc->broadcast_id = IWN4965_ID_BROADCAST; 1223 sc->rxonsz = IWN4965_RXONSZ; 1224 sc->schedsz = IWN4965_SCHEDSZ; 1225 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1226 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1227 sc->fwsz = IWN4965_FWSZ; 1228 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1229 sc->limits = &iwn4965_sensitivity_limits; 1230 sc->fwname = "iwn4965fw"; 1231 /* Override chains masks, ROM is known to be broken. */ 1232 sc->txchainmask = IWN_ANT_AB; 1233 sc->rxchainmask = IWN_ANT_ABC; 1234 /* Enable normal btcoex */ 1235 sc->sc_flags |= IWN_FLAG_BTCOEX; 1236 1237 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1238 1239 return 0; 1240 } 1241 1242 static int 1243 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1244 { 1245 struct iwn_ops *ops = &sc->ops; 1246 1247 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1248 1249 ops->load_firmware = iwn5000_load_firmware; 1250 ops->read_eeprom = iwn5000_read_eeprom; 1251 ops->post_alive = iwn5000_post_alive; 1252 ops->nic_config = iwn5000_nic_config; 1253 ops->update_sched = iwn5000_update_sched; 1254 ops->get_temperature = iwn5000_get_temperature; 1255 ops->get_rssi = iwn5000_get_rssi; 1256 ops->set_txpower = iwn5000_set_txpower; 1257 ops->init_gains = iwn5000_init_gains; 1258 ops->set_gains = iwn5000_set_gains; 1259 ops->add_node = iwn5000_add_node; 1260 ops->tx_done = iwn5000_tx_done; 1261 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1262 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1263 sc->ntxqs = IWN5000_NTXQUEUES; 1264 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1265 sc->ndmachnls = IWN5000_NDMACHNLS; 1266 sc->broadcast_id = IWN5000_ID_BROADCAST; 1267 sc->rxonsz = IWN5000_RXONSZ; 1268 sc->schedsz = IWN5000_SCHEDSZ; 1269 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1270 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1271 sc->fwsz = IWN5000_FWSZ; 1272 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1273 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1274 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1275 1276 return 0; 1277 } 1278 1279 /* 1280 * Attach the interface to 802.11 radiotap. 1281 */ 1282 static void 1283 iwn_radiotap_attach(struct iwn_softc *sc) 1284 { 1285 struct ifnet *ifp = sc->sc_ifp; 1286 struct ieee80211com *ic = ifp->if_l2com; 1287 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1288 ieee80211_radiotap_attach(ic, 1289 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1290 IWN_TX_RADIOTAP_PRESENT, 1291 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1292 IWN_RX_RADIOTAP_PRESENT); 1293 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1294 } 1295 1296 static void 1297 iwn_sysctlattach(struct iwn_softc *sc) 1298 { 1299 #ifdef IWN_DEBUG 1300 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1301 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1302 1303 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1304 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1305 "control debugging printfs"); 1306 #endif 1307 } 1308 1309 static struct ieee80211vap * 1310 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1311 enum ieee80211_opmode opmode, int flags, 1312 const uint8_t bssid[IEEE80211_ADDR_LEN], 1313 const uint8_t mac[IEEE80211_ADDR_LEN]) 1314 { 1315 struct iwn_vap *ivp; 1316 struct ieee80211vap *vap; 1317 uint8_t mac1[IEEE80211_ADDR_LEN]; 1318 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1319 1320 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1321 return NULL; 1322 1323 IEEE80211_ADDR_COPY(mac1, mac); 1324 1325 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 1326 M_80211_VAP, M_NOWAIT | M_ZERO); 1327 if (ivp == NULL) 1328 return NULL; 1329 vap = &ivp->iv_vap; 1330 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1331 ivp->ctx = IWN_RXON_BSS_CTX; 1332 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1333 vap->iv_bmissthreshold = 10; /* override default */ 1334 /* Override with driver methods. */ 1335 ivp->iv_newstate = vap->iv_newstate; 1336 vap->iv_newstate = iwn_newstate; 1337 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1338 1339 ieee80211_ratectl_init(vap); 1340 /* Complete setup. */ 1341 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1342 ic->ic_opmode = opmode; 1343 return vap; 1344 } 1345 1346 static void 1347 iwn_vap_delete(struct ieee80211vap *vap) 1348 { 1349 struct iwn_vap *ivp = IWN_VAP(vap); 1350 1351 ieee80211_ratectl_deinit(vap); 1352 ieee80211_vap_detach(vap); 1353 free(ivp, M_80211_VAP); 1354 } 1355 1356 static int 1357 iwn_detach(device_t dev) 1358 { 1359 struct iwn_softc *sc = device_get_softc(dev); 1360 struct ifnet *ifp = sc->sc_ifp; 1361 struct ieee80211com *ic; 1362 int qid; 1363 1364 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1365 1366 if (ifp != NULL) { 1367 ic = ifp->if_l2com; 1368 1369 ieee80211_draintask(ic, &sc->sc_reinit_task); 1370 ieee80211_draintask(ic, &sc->sc_radioon_task); 1371 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1372 1373 iwn_stop(sc); 1374 1375 taskqueue_drain_all(sc->sc_tq); 1376 taskqueue_free(sc->sc_tq); 1377 1378 callout_drain(&sc->watchdog_to); 1379 callout_drain(&sc->calib_to); 1380 ieee80211_ifdetach(ic); 1381 } 1382 1383 /* Uninstall interrupt handler. */ 1384 if (sc->irq != NULL) { 1385 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1386 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1387 sc->irq); 1388 pci_release_msi(dev); 1389 } 1390 1391 /* Free DMA resources. */ 1392 iwn_free_rx_ring(sc, &sc->rxq); 1393 for (qid = 0; qid < sc->ntxqs; qid++) 1394 iwn_free_tx_ring(sc, &sc->txq[qid]); 1395 iwn_free_sched(sc); 1396 iwn_free_kw(sc); 1397 if (sc->ict != NULL) 1398 iwn_free_ict(sc); 1399 iwn_free_fwmem(sc); 1400 1401 if (sc->mem != NULL) 1402 bus_release_resource(dev, SYS_RES_MEMORY, 1403 rman_get_rid(sc->mem), sc->mem); 1404 1405 if (ifp != NULL) 1406 if_free(ifp); 1407 1408 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1409 IWN_LOCK_DESTROY(sc); 1410 return 0; 1411 } 1412 1413 static int 1414 iwn_shutdown(device_t dev) 1415 { 1416 struct iwn_softc *sc = device_get_softc(dev); 1417 1418 iwn_stop(sc); 1419 return 0; 1420 } 1421 1422 static int 1423 iwn_suspend(device_t dev) 1424 { 1425 struct iwn_softc *sc = device_get_softc(dev); 1426 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1427 1428 ieee80211_suspend_all(ic); 1429 return 0; 1430 } 1431 1432 static int 1433 iwn_resume(device_t dev) 1434 { 1435 struct iwn_softc *sc = device_get_softc(dev); 1436 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1437 1438 /* Clear device-specific "PCI retry timeout" register (41h). */ 1439 pci_write_config(dev, 0x41, 0, 1); 1440 1441 ieee80211_resume_all(ic); 1442 return 0; 1443 } 1444 1445 static int 1446 iwn_nic_lock(struct iwn_softc *sc) 1447 { 1448 int ntries; 1449 1450 /* Request exclusive access to NIC. */ 1451 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1452 1453 /* Spin until we actually get the lock. */ 1454 for (ntries = 0; ntries < 1000; ntries++) { 1455 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1456 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1457 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1458 return 0; 1459 DELAY(10); 1460 } 1461 return ETIMEDOUT; 1462 } 1463 1464 static __inline void 1465 iwn_nic_unlock(struct iwn_softc *sc) 1466 { 1467 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1468 } 1469 1470 static __inline uint32_t 1471 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1472 { 1473 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1474 IWN_BARRIER_READ_WRITE(sc); 1475 return IWN_READ(sc, IWN_PRPH_RDATA); 1476 } 1477 1478 static __inline void 1479 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1480 { 1481 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1482 IWN_BARRIER_WRITE(sc); 1483 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1484 } 1485 1486 static __inline void 1487 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1488 { 1489 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1490 } 1491 1492 static __inline void 1493 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1494 { 1495 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1496 } 1497 1498 static __inline void 1499 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1500 const uint32_t *data, int count) 1501 { 1502 for (; count > 0; count--, data++, addr += 4) 1503 iwn_prph_write(sc, addr, *data); 1504 } 1505 1506 static __inline uint32_t 1507 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1508 { 1509 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1510 IWN_BARRIER_READ_WRITE(sc); 1511 return IWN_READ(sc, IWN_MEM_RDATA); 1512 } 1513 1514 static __inline void 1515 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1516 { 1517 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1518 IWN_BARRIER_WRITE(sc); 1519 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1520 } 1521 1522 static __inline void 1523 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1524 { 1525 uint32_t tmp; 1526 1527 tmp = iwn_mem_read(sc, addr & ~3); 1528 if (addr & 3) 1529 tmp = (tmp & 0x0000ffff) | data << 16; 1530 else 1531 tmp = (tmp & 0xffff0000) | data; 1532 iwn_mem_write(sc, addr & ~3, tmp); 1533 } 1534 1535 static __inline void 1536 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1537 int count) 1538 { 1539 for (; count > 0; count--, addr += 4) 1540 *data++ = iwn_mem_read(sc, addr); 1541 } 1542 1543 static __inline void 1544 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1545 int count) 1546 { 1547 for (; count > 0; count--, addr += 4) 1548 iwn_mem_write(sc, addr, val); 1549 } 1550 1551 static int 1552 iwn_eeprom_lock(struct iwn_softc *sc) 1553 { 1554 int i, ntries; 1555 1556 for (i = 0; i < 100; i++) { 1557 /* Request exclusive access to EEPROM. */ 1558 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1559 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1560 1561 /* Spin until we actually get the lock. */ 1562 for (ntries = 0; ntries < 100; ntries++) { 1563 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1564 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1565 return 0; 1566 DELAY(10); 1567 } 1568 } 1569 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1570 return ETIMEDOUT; 1571 } 1572 1573 static __inline void 1574 iwn_eeprom_unlock(struct iwn_softc *sc) 1575 { 1576 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1577 } 1578 1579 /* 1580 * Initialize access by host to One Time Programmable ROM. 1581 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1582 */ 1583 static int 1584 iwn_init_otprom(struct iwn_softc *sc) 1585 { 1586 uint16_t prev, base, next; 1587 int count, error; 1588 1589 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1590 1591 /* Wait for clock stabilization before accessing prph. */ 1592 if ((error = iwn_clock_wait(sc)) != 0) 1593 return error; 1594 1595 if ((error = iwn_nic_lock(sc)) != 0) 1596 return error; 1597 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1598 DELAY(5); 1599 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1600 iwn_nic_unlock(sc); 1601 1602 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1603 if (sc->base_params->shadow_ram_support) { 1604 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1605 IWN_RESET_LINK_PWR_MGMT_DIS); 1606 } 1607 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1608 /* Clear ECC status. */ 1609 IWN_SETBITS(sc, IWN_OTP_GP, 1610 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1611 1612 /* 1613 * Find the block before last block (contains the EEPROM image) 1614 * for HW without OTP shadow RAM. 1615 */ 1616 if (! sc->base_params->shadow_ram_support) { 1617 /* Switch to absolute addressing mode. */ 1618 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1619 base = prev = 0; 1620 for (count = 0; count < sc->base_params->max_ll_items; 1621 count++) { 1622 error = iwn_read_prom_data(sc, base, &next, 2); 1623 if (error != 0) 1624 return error; 1625 if (next == 0) /* End of linked-list. */ 1626 break; 1627 prev = base; 1628 base = le16toh(next); 1629 } 1630 if (count == 0 || count == sc->base_params->max_ll_items) 1631 return EIO; 1632 /* Skip "next" word. */ 1633 sc->prom_base = prev + 1; 1634 } 1635 1636 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1637 1638 return 0; 1639 } 1640 1641 static int 1642 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1643 { 1644 uint8_t *out = data; 1645 uint32_t val, tmp; 1646 int ntries; 1647 1648 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1649 1650 addr += sc->prom_base; 1651 for (; count > 0; count -= 2, addr++) { 1652 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1653 for (ntries = 0; ntries < 10; ntries++) { 1654 val = IWN_READ(sc, IWN_EEPROM); 1655 if (val & IWN_EEPROM_READ_VALID) 1656 break; 1657 DELAY(5); 1658 } 1659 if (ntries == 10) { 1660 device_printf(sc->sc_dev, 1661 "timeout reading ROM at 0x%x\n", addr); 1662 return ETIMEDOUT; 1663 } 1664 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1665 /* OTPROM, check for ECC errors. */ 1666 tmp = IWN_READ(sc, IWN_OTP_GP); 1667 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1668 device_printf(sc->sc_dev, 1669 "OTPROM ECC error at 0x%x\n", addr); 1670 return EIO; 1671 } 1672 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1673 /* Correctable ECC error, clear bit. */ 1674 IWN_SETBITS(sc, IWN_OTP_GP, 1675 IWN_OTP_GP_ECC_CORR_STTS); 1676 } 1677 } 1678 *out++ = val >> 16; 1679 if (count > 1) 1680 *out++ = val >> 24; 1681 } 1682 1683 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1684 1685 return 0; 1686 } 1687 1688 static void 1689 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1690 { 1691 if (error != 0) 1692 return; 1693 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1694 *(bus_addr_t *)arg = segs[0].ds_addr; 1695 } 1696 1697 static int 1698 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1699 void **kvap, bus_size_t size, bus_size_t alignment) 1700 { 1701 int error; 1702 1703 dma->tag = NULL; 1704 dma->size = size; 1705 1706 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1707 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1708 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1709 if (error != 0) 1710 goto fail; 1711 1712 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1713 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1714 if (error != 0) 1715 goto fail; 1716 1717 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1718 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1719 if (error != 0) 1720 goto fail; 1721 1722 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1723 1724 if (kvap != NULL) 1725 *kvap = dma->vaddr; 1726 1727 return 0; 1728 1729 fail: iwn_dma_contig_free(dma); 1730 return error; 1731 } 1732 1733 static void 1734 iwn_dma_contig_free(struct iwn_dma_info *dma) 1735 { 1736 if (dma->vaddr != NULL) { 1737 bus_dmamap_sync(dma->tag, dma->map, 1738 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1739 bus_dmamap_unload(dma->tag, dma->map); 1740 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1741 dma->vaddr = NULL; 1742 } 1743 if (dma->tag != NULL) { 1744 bus_dma_tag_destroy(dma->tag); 1745 dma->tag = NULL; 1746 } 1747 } 1748 1749 static int 1750 iwn_alloc_sched(struct iwn_softc *sc) 1751 { 1752 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1753 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1754 sc->schedsz, 1024); 1755 } 1756 1757 static void 1758 iwn_free_sched(struct iwn_softc *sc) 1759 { 1760 iwn_dma_contig_free(&sc->sched_dma); 1761 } 1762 1763 static int 1764 iwn_alloc_kw(struct iwn_softc *sc) 1765 { 1766 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1767 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1768 } 1769 1770 static void 1771 iwn_free_kw(struct iwn_softc *sc) 1772 { 1773 iwn_dma_contig_free(&sc->kw_dma); 1774 } 1775 1776 static int 1777 iwn_alloc_ict(struct iwn_softc *sc) 1778 { 1779 /* ICT table must be aligned on a 4KB boundary. */ 1780 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1781 IWN_ICT_SIZE, 4096); 1782 } 1783 1784 static void 1785 iwn_free_ict(struct iwn_softc *sc) 1786 { 1787 iwn_dma_contig_free(&sc->ict_dma); 1788 } 1789 1790 static int 1791 iwn_alloc_fwmem(struct iwn_softc *sc) 1792 { 1793 /* Must be aligned on a 16-byte boundary. */ 1794 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1795 } 1796 1797 static void 1798 iwn_free_fwmem(struct iwn_softc *sc) 1799 { 1800 iwn_dma_contig_free(&sc->fw_dma); 1801 } 1802 1803 static int 1804 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1805 { 1806 bus_size_t size; 1807 int i, error; 1808 1809 ring->cur = 0; 1810 1811 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1812 1813 /* Allocate RX descriptors (256-byte aligned). */ 1814 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1815 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1816 size, 256); 1817 if (error != 0) { 1818 device_printf(sc->sc_dev, 1819 "%s: could not allocate RX ring DMA memory, error %d\n", 1820 __func__, error); 1821 goto fail; 1822 } 1823 1824 /* Allocate RX status area (16-byte aligned). */ 1825 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1826 sizeof (struct iwn_rx_status), 16); 1827 if (error != 0) { 1828 device_printf(sc->sc_dev, 1829 "%s: could not allocate RX status DMA memory, error %d\n", 1830 __func__, error); 1831 goto fail; 1832 } 1833 1834 /* Create RX buffer DMA tag. */ 1835 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1836 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1837 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1838 &ring->data_dmat); 1839 if (error != 0) { 1840 device_printf(sc->sc_dev, 1841 "%s: could not create RX buf DMA tag, error %d\n", 1842 __func__, error); 1843 goto fail; 1844 } 1845 1846 /* 1847 * Allocate and map RX buffers. 1848 */ 1849 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1850 struct iwn_rx_data *data = &ring->data[i]; 1851 bus_addr_t paddr; 1852 1853 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1854 if (error != 0) { 1855 device_printf(sc->sc_dev, 1856 "%s: could not create RX buf DMA map, error %d\n", 1857 __func__, error); 1858 goto fail; 1859 } 1860 1861 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1862 IWN_RBUF_SIZE); 1863 if (data->m == NULL) { 1864 device_printf(sc->sc_dev, 1865 "%s: could not allocate RX mbuf\n", __func__); 1866 error = ENOBUFS; 1867 goto fail; 1868 } 1869 1870 error = bus_dmamap_load(ring->data_dmat, data->map, 1871 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1872 &paddr, BUS_DMA_NOWAIT); 1873 if (error != 0 && error != EFBIG) { 1874 device_printf(sc->sc_dev, 1875 "%s: can't not map mbuf, error %d\n", __func__, 1876 error); 1877 goto fail; 1878 } 1879 1880 /* Set physical address of RX buffer (256-byte aligned). */ 1881 ring->desc[i] = htole32(paddr >> 8); 1882 } 1883 1884 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1885 BUS_DMASYNC_PREWRITE); 1886 1887 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1888 1889 return 0; 1890 1891 fail: iwn_free_rx_ring(sc, ring); 1892 1893 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1894 1895 return error; 1896 } 1897 1898 static void 1899 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1900 { 1901 int ntries; 1902 1903 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1904 1905 if (iwn_nic_lock(sc) == 0) { 1906 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1907 for (ntries = 0; ntries < 1000; ntries++) { 1908 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1909 IWN_FH_RX_STATUS_IDLE) 1910 break; 1911 DELAY(10); 1912 } 1913 iwn_nic_unlock(sc); 1914 } 1915 ring->cur = 0; 1916 sc->last_rx_valid = 0; 1917 } 1918 1919 static void 1920 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1921 { 1922 int i; 1923 1924 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1925 1926 iwn_dma_contig_free(&ring->desc_dma); 1927 iwn_dma_contig_free(&ring->stat_dma); 1928 1929 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1930 struct iwn_rx_data *data = &ring->data[i]; 1931 1932 if (data->m != NULL) { 1933 bus_dmamap_sync(ring->data_dmat, data->map, 1934 BUS_DMASYNC_POSTREAD); 1935 bus_dmamap_unload(ring->data_dmat, data->map); 1936 m_freem(data->m); 1937 data->m = NULL; 1938 } 1939 if (data->map != NULL) 1940 bus_dmamap_destroy(ring->data_dmat, data->map); 1941 } 1942 if (ring->data_dmat != NULL) { 1943 bus_dma_tag_destroy(ring->data_dmat); 1944 ring->data_dmat = NULL; 1945 } 1946 } 1947 1948 static int 1949 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1950 { 1951 bus_addr_t paddr; 1952 bus_size_t size; 1953 int i, error; 1954 1955 ring->qid = qid; 1956 ring->queued = 0; 1957 ring->cur = 0; 1958 1959 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1960 1961 /* Allocate TX descriptors (256-byte aligned). */ 1962 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1963 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1964 size, 256); 1965 if (error != 0) { 1966 device_printf(sc->sc_dev, 1967 "%s: could not allocate TX ring DMA memory, error %d\n", 1968 __func__, error); 1969 goto fail; 1970 } 1971 1972 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1973 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1974 size, 4); 1975 if (error != 0) { 1976 device_printf(sc->sc_dev, 1977 "%s: could not allocate TX cmd DMA memory, error %d\n", 1978 __func__, error); 1979 goto fail; 1980 } 1981 1982 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1983 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1984 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1985 &ring->data_dmat); 1986 if (error != 0) { 1987 device_printf(sc->sc_dev, 1988 "%s: could not create TX buf DMA tag, error %d\n", 1989 __func__, error); 1990 goto fail; 1991 } 1992 1993 paddr = ring->cmd_dma.paddr; 1994 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1995 struct iwn_tx_data *data = &ring->data[i]; 1996 1997 data->cmd_paddr = paddr; 1998 data->scratch_paddr = paddr + 12; 1999 paddr += sizeof (struct iwn_tx_cmd); 2000 2001 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2002 if (error != 0) { 2003 device_printf(sc->sc_dev, 2004 "%s: could not create TX buf DMA map, error %d\n", 2005 __func__, error); 2006 goto fail; 2007 } 2008 } 2009 2010 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2011 2012 return 0; 2013 2014 fail: iwn_free_tx_ring(sc, ring); 2015 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2016 return error; 2017 } 2018 2019 static void 2020 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2021 { 2022 int i; 2023 2024 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2025 2026 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2027 struct iwn_tx_data *data = &ring->data[i]; 2028 2029 if (data->m != NULL) { 2030 bus_dmamap_sync(ring->data_dmat, data->map, 2031 BUS_DMASYNC_POSTWRITE); 2032 bus_dmamap_unload(ring->data_dmat, data->map); 2033 m_freem(data->m); 2034 data->m = NULL; 2035 } 2036 } 2037 /* Clear TX descriptors. */ 2038 memset(ring->desc, 0, ring->desc_dma.size); 2039 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2040 BUS_DMASYNC_PREWRITE); 2041 sc->qfullmsk &= ~(1 << ring->qid); 2042 ring->queued = 0; 2043 ring->cur = 0; 2044 } 2045 2046 static void 2047 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2048 { 2049 int i; 2050 2051 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2052 2053 iwn_dma_contig_free(&ring->desc_dma); 2054 iwn_dma_contig_free(&ring->cmd_dma); 2055 2056 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2057 struct iwn_tx_data *data = &ring->data[i]; 2058 2059 if (data->m != NULL) { 2060 bus_dmamap_sync(ring->data_dmat, data->map, 2061 BUS_DMASYNC_POSTWRITE); 2062 bus_dmamap_unload(ring->data_dmat, data->map); 2063 m_freem(data->m); 2064 } 2065 if (data->map != NULL) 2066 bus_dmamap_destroy(ring->data_dmat, data->map); 2067 } 2068 if (ring->data_dmat != NULL) { 2069 bus_dma_tag_destroy(ring->data_dmat); 2070 ring->data_dmat = NULL; 2071 } 2072 } 2073 2074 static void 2075 iwn5000_ict_reset(struct iwn_softc *sc) 2076 { 2077 /* Disable interrupts. */ 2078 IWN_WRITE(sc, IWN_INT_MASK, 0); 2079 2080 /* Reset ICT table. */ 2081 memset(sc->ict, 0, IWN_ICT_SIZE); 2082 sc->ict_cur = 0; 2083 2084 /* Set physical address of ICT table (4KB aligned). */ 2085 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2086 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2087 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2088 2089 /* Enable periodic RX interrupt. */ 2090 sc->int_mask |= IWN_INT_RX_PERIODIC; 2091 /* Switch to ICT interrupt mode in driver. */ 2092 sc->sc_flags |= IWN_FLAG_USE_ICT; 2093 2094 /* Re-enable interrupts. */ 2095 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2096 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2097 } 2098 2099 static int 2100 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2101 { 2102 struct iwn_ops *ops = &sc->ops; 2103 uint16_t val; 2104 int error; 2105 2106 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2107 2108 /* Check whether adapter has an EEPROM or an OTPROM. */ 2109 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2110 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2111 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2112 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2113 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2114 2115 /* Adapter has to be powered on for EEPROM access to work. */ 2116 if ((error = iwn_apm_init(sc)) != 0) { 2117 device_printf(sc->sc_dev, 2118 "%s: could not power ON adapter, error %d\n", __func__, 2119 error); 2120 return error; 2121 } 2122 2123 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2124 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2125 return EIO; 2126 } 2127 if ((error = iwn_eeprom_lock(sc)) != 0) { 2128 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2129 __func__, error); 2130 return error; 2131 } 2132 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2133 if ((error = iwn_init_otprom(sc)) != 0) { 2134 device_printf(sc->sc_dev, 2135 "%s: could not initialize OTPROM, error %d\n", 2136 __func__, error); 2137 return error; 2138 } 2139 } 2140 2141 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2142 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2143 /* Check if HT support is bonded out. */ 2144 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2145 sc->sc_flags |= IWN_FLAG_HAS_11N; 2146 2147 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2148 sc->rfcfg = le16toh(val); 2149 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2150 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2151 if (sc->txchainmask == 0) 2152 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2153 if (sc->rxchainmask == 0) 2154 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2155 2156 /* Read MAC address. */ 2157 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2158 2159 /* Read adapter-specific information from EEPROM. */ 2160 ops->read_eeprom(sc); 2161 2162 iwn_apm_stop(sc); /* Power OFF adapter. */ 2163 2164 iwn_eeprom_unlock(sc); 2165 2166 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2167 2168 return 0; 2169 } 2170 2171 static void 2172 iwn4965_read_eeprom(struct iwn_softc *sc) 2173 { 2174 uint32_t addr; 2175 uint16_t val; 2176 int i; 2177 2178 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2179 2180 /* Read regulatory domain (4 ASCII characters). */ 2181 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2182 2183 /* Read the list of authorized channels (20MHz ones only). */ 2184 for (i = 0; i < IWN_NBANDS - 1; i++) { 2185 addr = iwn4965_regulatory_bands[i]; 2186 iwn_read_eeprom_channels(sc, i, addr); 2187 } 2188 2189 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2190 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2191 sc->maxpwr2GHz = val & 0xff; 2192 sc->maxpwr5GHz = val >> 8; 2193 /* Check that EEPROM values are within valid range. */ 2194 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2195 sc->maxpwr5GHz = 38; 2196 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2197 sc->maxpwr2GHz = 38; 2198 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2199 sc->maxpwr2GHz, sc->maxpwr5GHz); 2200 2201 /* Read samples for each TX power group. */ 2202 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2203 sizeof sc->bands); 2204 2205 /* Read voltage at which samples were taken. */ 2206 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2207 sc->eeprom_voltage = (int16_t)le16toh(val); 2208 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2209 sc->eeprom_voltage); 2210 2211 #ifdef IWN_DEBUG 2212 /* Print samples. */ 2213 if (sc->sc_debug & IWN_DEBUG_ANY) { 2214 for (i = 0; i < IWN_NBANDS - 1; i++) 2215 iwn4965_print_power_group(sc, i); 2216 } 2217 #endif 2218 2219 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2220 } 2221 2222 #ifdef IWN_DEBUG 2223 static void 2224 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2225 { 2226 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2227 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2228 int j, c; 2229 2230 printf("===band %d===\n", i); 2231 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2232 printf("chan1 num=%d\n", chans[0].num); 2233 for (c = 0; c < 2; c++) { 2234 for (j = 0; j < IWN_NSAMPLES; j++) { 2235 printf("chain %d, sample %d: temp=%d gain=%d " 2236 "power=%d pa_det=%d\n", c, j, 2237 chans[0].samples[c][j].temp, 2238 chans[0].samples[c][j].gain, 2239 chans[0].samples[c][j].power, 2240 chans[0].samples[c][j].pa_det); 2241 } 2242 } 2243 printf("chan2 num=%d\n", chans[1].num); 2244 for (c = 0; c < 2; c++) { 2245 for (j = 0; j < IWN_NSAMPLES; j++) { 2246 printf("chain %d, sample %d: temp=%d gain=%d " 2247 "power=%d pa_det=%d\n", c, j, 2248 chans[1].samples[c][j].temp, 2249 chans[1].samples[c][j].gain, 2250 chans[1].samples[c][j].power, 2251 chans[1].samples[c][j].pa_det); 2252 } 2253 } 2254 } 2255 #endif 2256 2257 static void 2258 iwn5000_read_eeprom(struct iwn_softc *sc) 2259 { 2260 struct iwn5000_eeprom_calib_hdr hdr; 2261 int32_t volt; 2262 uint32_t base, addr; 2263 uint16_t val; 2264 int i; 2265 2266 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2267 2268 /* Read regulatory domain (4 ASCII characters). */ 2269 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2270 base = le16toh(val); 2271 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2272 sc->eeprom_domain, 4); 2273 2274 /* Read the list of authorized channels (20MHz ones only). */ 2275 for (i = 0; i < IWN_NBANDS - 1; i++) { 2276 addr = base + sc->base_params->regulatory_bands[i]; 2277 iwn_read_eeprom_channels(sc, i, addr); 2278 } 2279 2280 /* Read enhanced TX power information for 6000 Series. */ 2281 if (sc->base_params->enhanced_TX_power) 2282 iwn_read_eeprom_enhinfo(sc); 2283 2284 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2285 base = le16toh(val); 2286 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2287 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2288 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2289 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2290 sc->calib_ver = hdr.version; 2291 2292 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2293 sc->eeprom_voltage = le16toh(hdr.volt); 2294 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2295 sc->eeprom_temp_high=le16toh(val); 2296 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2297 sc->eeprom_temp = le16toh(val); 2298 } 2299 2300 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2301 /* Compute temperature offset. */ 2302 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2303 sc->eeprom_temp = le16toh(val); 2304 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2305 volt = le16toh(val); 2306 sc->temp_off = sc->eeprom_temp - (volt / -5); 2307 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2308 sc->eeprom_temp, volt, sc->temp_off); 2309 } else { 2310 /* Read crystal calibration. */ 2311 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2312 &sc->eeprom_crystal, sizeof (uint32_t)); 2313 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2314 le32toh(sc->eeprom_crystal)); 2315 } 2316 2317 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2318 2319 } 2320 2321 /* 2322 * Translate EEPROM flags to net80211. 2323 */ 2324 static uint32_t 2325 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2326 { 2327 uint32_t nflags; 2328 2329 nflags = 0; 2330 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2331 nflags |= IEEE80211_CHAN_PASSIVE; 2332 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2333 nflags |= IEEE80211_CHAN_NOADHOC; 2334 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2335 nflags |= IEEE80211_CHAN_DFS; 2336 /* XXX apparently IBSS may still be marked */ 2337 nflags |= IEEE80211_CHAN_NOADHOC; 2338 } 2339 2340 return nflags; 2341 } 2342 2343 static void 2344 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2345 { 2346 struct ifnet *ifp = sc->sc_ifp; 2347 struct ieee80211com *ic = ifp->if_l2com; 2348 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2349 const struct iwn_chan_band *band = &iwn_bands[n]; 2350 struct ieee80211_channel *c; 2351 uint8_t chan; 2352 int i, nflags; 2353 2354 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2355 2356 for (i = 0; i < band->nchan; i++) { 2357 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2358 DPRINTF(sc, IWN_DEBUG_RESET, 2359 "skip chan %d flags 0x%x maxpwr %d\n", 2360 band->chan[i], channels[i].flags, 2361 channels[i].maxpwr); 2362 continue; 2363 } 2364 chan = band->chan[i]; 2365 nflags = iwn_eeprom_channel_flags(&channels[i]); 2366 2367 c = &ic->ic_channels[ic->ic_nchans++]; 2368 c->ic_ieee = chan; 2369 c->ic_maxregpower = channels[i].maxpwr; 2370 c->ic_maxpower = 2*c->ic_maxregpower; 2371 2372 if (n == 0) { /* 2GHz band */ 2373 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2374 /* G =>'s B is supported */ 2375 c->ic_flags = IEEE80211_CHAN_B | nflags; 2376 c = &ic->ic_channels[ic->ic_nchans++]; 2377 c[0] = c[-1]; 2378 c->ic_flags = IEEE80211_CHAN_G | nflags; 2379 } else { /* 5GHz band */ 2380 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2381 c->ic_flags = IEEE80211_CHAN_A | nflags; 2382 } 2383 2384 /* Save maximum allowed TX power for this channel. */ 2385 sc->maxpwr[chan] = channels[i].maxpwr; 2386 2387 DPRINTF(sc, IWN_DEBUG_RESET, 2388 "add chan %d flags 0x%x maxpwr %d\n", chan, 2389 channels[i].flags, channels[i].maxpwr); 2390 2391 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2392 /* add HT20, HT40 added separately */ 2393 c = &ic->ic_channels[ic->ic_nchans++]; 2394 c[0] = c[-1]; 2395 c->ic_flags |= IEEE80211_CHAN_HT20; 2396 } 2397 } 2398 2399 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2400 2401 } 2402 2403 static void 2404 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2405 { 2406 struct ifnet *ifp = sc->sc_ifp; 2407 struct ieee80211com *ic = ifp->if_l2com; 2408 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2409 const struct iwn_chan_band *band = &iwn_bands[n]; 2410 struct ieee80211_channel *c, *cent, *extc; 2411 uint8_t chan; 2412 int i, nflags; 2413 2414 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2415 2416 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2417 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2418 return; 2419 } 2420 2421 for (i = 0; i < band->nchan; i++) { 2422 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2423 DPRINTF(sc, IWN_DEBUG_RESET, 2424 "skip chan %d flags 0x%x maxpwr %d\n", 2425 band->chan[i], channels[i].flags, 2426 channels[i].maxpwr); 2427 continue; 2428 } 2429 chan = band->chan[i]; 2430 nflags = iwn_eeprom_channel_flags(&channels[i]); 2431 2432 /* 2433 * Each entry defines an HT40 channel pair; find the 2434 * center channel, then the extension channel above. 2435 */ 2436 cent = ieee80211_find_channel_byieee(ic, chan, 2437 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2438 if (cent == NULL) { /* XXX shouldn't happen */ 2439 device_printf(sc->sc_dev, 2440 "%s: no entry for channel %d\n", __func__, chan); 2441 continue; 2442 } 2443 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2444 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2445 if (extc == NULL) { 2446 DPRINTF(sc, IWN_DEBUG_RESET, 2447 "%s: skip chan %d, extension channel not found\n", 2448 __func__, chan); 2449 continue; 2450 } 2451 2452 DPRINTF(sc, IWN_DEBUG_RESET, 2453 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2454 chan, channels[i].flags, channels[i].maxpwr); 2455 2456 c = &ic->ic_channels[ic->ic_nchans++]; 2457 c[0] = cent[0]; 2458 c->ic_extieee = extc->ic_ieee; 2459 c->ic_flags &= ~IEEE80211_CHAN_HT; 2460 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2461 c = &ic->ic_channels[ic->ic_nchans++]; 2462 c[0] = extc[0]; 2463 c->ic_extieee = cent->ic_ieee; 2464 c->ic_flags &= ~IEEE80211_CHAN_HT; 2465 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2466 } 2467 2468 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2469 2470 } 2471 2472 static void 2473 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2474 { 2475 struct ifnet *ifp = sc->sc_ifp; 2476 struct ieee80211com *ic = ifp->if_l2com; 2477 2478 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2479 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2480 2481 if (n < 5) 2482 iwn_read_eeprom_band(sc, n); 2483 else 2484 iwn_read_eeprom_ht40(sc, n); 2485 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2486 } 2487 2488 static struct iwn_eeprom_chan * 2489 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2490 { 2491 int band, chan, i, j; 2492 2493 if (IEEE80211_IS_CHAN_HT40(c)) { 2494 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2495 if (IEEE80211_IS_CHAN_HT40D(c)) 2496 chan = c->ic_extieee; 2497 else 2498 chan = c->ic_ieee; 2499 for (i = 0; i < iwn_bands[band].nchan; i++) { 2500 if (iwn_bands[band].chan[i] == chan) 2501 return &sc->eeprom_channels[band][i]; 2502 } 2503 } else { 2504 for (j = 0; j < 5; j++) { 2505 for (i = 0; i < iwn_bands[j].nchan; i++) { 2506 if (iwn_bands[j].chan[i] == c->ic_ieee) 2507 return &sc->eeprom_channels[j][i]; 2508 } 2509 } 2510 } 2511 return NULL; 2512 } 2513 2514 /* 2515 * Enforce flags read from EEPROM. 2516 */ 2517 static int 2518 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2519 int nchan, struct ieee80211_channel chans[]) 2520 { 2521 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2522 int i; 2523 2524 for (i = 0; i < nchan; i++) { 2525 struct ieee80211_channel *c = &chans[i]; 2526 struct iwn_eeprom_chan *channel; 2527 2528 channel = iwn_find_eeprom_channel(sc, c); 2529 if (channel == NULL) { 2530 if_printf(ic->ic_ifp, 2531 "%s: invalid channel %u freq %u/0x%x\n", 2532 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2533 return EINVAL; 2534 } 2535 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2536 } 2537 2538 return 0; 2539 } 2540 2541 static void 2542 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2543 { 2544 struct iwn_eeprom_enhinfo enhinfo[35]; 2545 struct ifnet *ifp = sc->sc_ifp; 2546 struct ieee80211com *ic = ifp->if_l2com; 2547 struct ieee80211_channel *c; 2548 uint16_t val, base; 2549 int8_t maxpwr; 2550 uint8_t flags; 2551 int i, j; 2552 2553 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2554 2555 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2556 base = le16toh(val); 2557 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2558 enhinfo, sizeof enhinfo); 2559 2560 for (i = 0; i < nitems(enhinfo); i++) { 2561 flags = enhinfo[i].flags; 2562 if (!(flags & IWN_ENHINFO_VALID)) 2563 continue; /* Skip invalid entries. */ 2564 2565 maxpwr = 0; 2566 if (sc->txchainmask & IWN_ANT_A) 2567 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2568 if (sc->txchainmask & IWN_ANT_B) 2569 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2570 if (sc->txchainmask & IWN_ANT_C) 2571 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2572 if (sc->ntxchains == 2) 2573 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2574 else if (sc->ntxchains == 3) 2575 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2576 2577 for (j = 0; j < ic->ic_nchans; j++) { 2578 c = &ic->ic_channels[j]; 2579 if ((flags & IWN_ENHINFO_5GHZ)) { 2580 if (!IEEE80211_IS_CHAN_A(c)) 2581 continue; 2582 } else if ((flags & IWN_ENHINFO_OFDM)) { 2583 if (!IEEE80211_IS_CHAN_G(c)) 2584 continue; 2585 } else if (!IEEE80211_IS_CHAN_B(c)) 2586 continue; 2587 if ((flags & IWN_ENHINFO_HT40)) { 2588 if (!IEEE80211_IS_CHAN_HT40(c)) 2589 continue; 2590 } else { 2591 if (IEEE80211_IS_CHAN_HT40(c)) 2592 continue; 2593 } 2594 if (enhinfo[i].chan != 0 && 2595 enhinfo[i].chan != c->ic_ieee) 2596 continue; 2597 2598 DPRINTF(sc, IWN_DEBUG_RESET, 2599 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2600 c->ic_flags, maxpwr / 2); 2601 c->ic_maxregpower = maxpwr / 2; 2602 c->ic_maxpower = maxpwr; 2603 } 2604 } 2605 2606 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2607 2608 } 2609 2610 static struct ieee80211_node * 2611 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2612 { 2613 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2614 } 2615 2616 static __inline int 2617 rate2plcp(int rate) 2618 { 2619 switch (rate & 0xff) { 2620 case 12: return 0xd; 2621 case 18: return 0xf; 2622 case 24: return 0x5; 2623 case 36: return 0x7; 2624 case 48: return 0x9; 2625 case 72: return 0xb; 2626 case 96: return 0x1; 2627 case 108: return 0x3; 2628 case 2: return 10; 2629 case 4: return 20; 2630 case 11: return 55; 2631 case 22: return 110; 2632 } 2633 return 0; 2634 } 2635 2636 /* 2637 * Calculate the required PLCP value from the given rate, 2638 * to the given node. 2639 * 2640 * This will take the node configuration (eg 11n, rate table 2641 * setup, etc) into consideration. 2642 */ 2643 static uint32_t 2644 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2645 uint8_t rate) 2646 { 2647 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2648 struct ieee80211com *ic = ni->ni_ic; 2649 uint8_t txant1, txant2; 2650 uint32_t plcp = 0; 2651 int ridx; 2652 2653 /* Use the first valid TX antenna. */ 2654 txant1 = IWN_LSB(sc->txchainmask); 2655 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2656 2657 /* 2658 * If it's an MCS rate, let's set the plcp correctly 2659 * and set the relevant flags based on the node config. 2660 */ 2661 if (rate & IEEE80211_RATE_MCS) { 2662 /* 2663 * Set the initial PLCP value to be between 0->31 for 2664 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2665 * flag. 2666 */ 2667 plcp = RV(rate) | IWN_RFLAG_MCS; 2668 2669 /* 2670 * XXX the following should only occur if both 2671 * the local configuration _and_ the remote node 2672 * advertise these capabilities. Thus this code 2673 * may need fixing! 2674 */ 2675 2676 /* 2677 * Set the channel width and guard interval. 2678 */ 2679 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2680 plcp |= IWN_RFLAG_HT40; 2681 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2682 plcp |= IWN_RFLAG_SGI; 2683 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2684 plcp |= IWN_RFLAG_SGI; 2685 } 2686 2687 /* 2688 * If it's a two stream rate, enable TX on both 2689 * antennas. 2690 * 2691 * XXX three stream rates? 2692 */ 2693 if (rate > 0x87) 2694 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2695 else 2696 plcp |= IWN_RFLAG_ANT(txant1); 2697 } else { 2698 /* 2699 * Set the initial PLCP - fine for both 2700 * OFDM and CCK rates. 2701 */ 2702 plcp = rate2plcp(rate); 2703 2704 /* Set CCK flag if it's CCK */ 2705 2706 /* XXX It would be nice to have a method 2707 * to map the ridx -> phy table entry 2708 * so we could just query that, rather than 2709 * this hack to check against IWN_RIDX_OFDM6. 2710 */ 2711 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2712 rate & IEEE80211_RATE_VAL); 2713 if (ridx < IWN_RIDX_OFDM6 && 2714 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2715 plcp |= IWN_RFLAG_CCK; 2716 2717 /* Set antenna configuration */ 2718 plcp |= IWN_RFLAG_ANT(txant1); 2719 } 2720 2721 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2722 __func__, 2723 rate, 2724 plcp); 2725 2726 return (htole32(plcp)); 2727 #undef RV 2728 } 2729 2730 static void 2731 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2732 { 2733 /* Doesn't do anything at the moment */ 2734 } 2735 2736 static int 2737 iwn_media_change(struct ifnet *ifp) 2738 { 2739 int error; 2740 2741 error = ieee80211_media_change(ifp); 2742 /* NB: only the fixed rate can change and that doesn't need a reset */ 2743 return (error == ENETRESET ? 0 : error); 2744 } 2745 2746 static int 2747 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2748 { 2749 struct iwn_vap *ivp = IWN_VAP(vap); 2750 struct ieee80211com *ic = vap->iv_ic; 2751 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2752 int error = 0; 2753 2754 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2755 2756 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2757 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2758 2759 IEEE80211_UNLOCK(ic); 2760 IWN_LOCK(sc); 2761 callout_stop(&sc->calib_to); 2762 2763 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2764 2765 switch (nstate) { 2766 case IEEE80211_S_ASSOC: 2767 if (vap->iv_state != IEEE80211_S_RUN) 2768 break; 2769 /* FALLTHROUGH */ 2770 case IEEE80211_S_AUTH: 2771 if (vap->iv_state == IEEE80211_S_AUTH) 2772 break; 2773 2774 /* 2775 * !AUTH -> AUTH transition requires state reset to handle 2776 * reassociations correctly. 2777 */ 2778 sc->rxon->associd = 0; 2779 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2780 sc->calib.state = IWN_CALIB_STATE_INIT; 2781 2782 if ((error = iwn_auth(sc, vap)) != 0) { 2783 device_printf(sc->sc_dev, 2784 "%s: could not move to auth state\n", __func__); 2785 } 2786 break; 2787 2788 case IEEE80211_S_RUN: 2789 /* 2790 * RUN -> RUN transition; Just restart the timers. 2791 */ 2792 if (vap->iv_state == IEEE80211_S_RUN) { 2793 sc->calib_cnt = 0; 2794 break; 2795 } 2796 2797 /* 2798 * !RUN -> RUN requires setting the association id 2799 * which is done with a firmware cmd. We also defer 2800 * starting the timers until that work is done. 2801 */ 2802 if ((error = iwn_run(sc, vap)) != 0) { 2803 device_printf(sc->sc_dev, 2804 "%s: could not move to run state\n", __func__); 2805 } 2806 break; 2807 2808 case IEEE80211_S_INIT: 2809 sc->calib.state = IWN_CALIB_STATE_INIT; 2810 break; 2811 2812 default: 2813 break; 2814 } 2815 IWN_UNLOCK(sc); 2816 IEEE80211_LOCK(ic); 2817 if (error != 0){ 2818 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2819 return error; 2820 } 2821 2822 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2823 2824 return ivp->iv_newstate(vap, nstate, arg); 2825 } 2826 2827 static void 2828 iwn_calib_timeout(void *arg) 2829 { 2830 struct iwn_softc *sc = arg; 2831 2832 IWN_LOCK_ASSERT(sc); 2833 2834 /* Force automatic TX power calibration every 60 secs. */ 2835 if (++sc->calib_cnt >= 120) { 2836 uint32_t flags = 0; 2837 2838 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2839 "sending request for statistics"); 2840 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2841 sizeof flags, 1); 2842 sc->calib_cnt = 0; 2843 } 2844 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2845 sc); 2846 } 2847 2848 /* 2849 * Process an RX_PHY firmware notification. This is usually immediately 2850 * followed by an MPDU_RX_DONE notification. 2851 */ 2852 static void 2853 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2854 struct iwn_rx_data *data) 2855 { 2856 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2857 2858 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2859 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2860 2861 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2862 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2863 sc->last_rx_valid = 1; 2864 } 2865 2866 /* 2867 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2868 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2869 */ 2870 static void 2871 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2872 struct iwn_rx_data *data) 2873 { 2874 struct iwn_ops *ops = &sc->ops; 2875 struct ifnet *ifp = sc->sc_ifp; 2876 struct ieee80211com *ic = ifp->if_l2com; 2877 struct iwn_rx_ring *ring = &sc->rxq; 2878 struct ieee80211_frame *wh; 2879 struct ieee80211_node *ni; 2880 struct mbuf *m, *m1; 2881 struct iwn_rx_stat *stat; 2882 caddr_t head; 2883 bus_addr_t paddr; 2884 uint32_t flags; 2885 int error, len, rssi, nf; 2886 2887 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2888 2889 if (desc->type == IWN_MPDU_RX_DONE) { 2890 /* Check for prior RX_PHY notification. */ 2891 if (!sc->last_rx_valid) { 2892 DPRINTF(sc, IWN_DEBUG_ANY, 2893 "%s: missing RX_PHY\n", __func__); 2894 return; 2895 } 2896 stat = &sc->last_rx_stat; 2897 } else 2898 stat = (struct iwn_rx_stat *)(desc + 1); 2899 2900 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2901 2902 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2903 device_printf(sc->sc_dev, 2904 "%s: invalid RX statistic header, len %d\n", __func__, 2905 stat->cfg_phy_len); 2906 return; 2907 } 2908 if (desc->type == IWN_MPDU_RX_DONE) { 2909 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2910 head = (caddr_t)(mpdu + 1); 2911 len = le16toh(mpdu->len); 2912 } else { 2913 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2914 len = le16toh(stat->len); 2915 } 2916 2917 flags = le32toh(*(uint32_t *)(head + len)); 2918 2919 /* Discard frames with a bad FCS early. */ 2920 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2921 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2922 __func__, flags); 2923 ifp->if_ierrors++; 2924 return; 2925 } 2926 /* Discard frames that are too short. */ 2927 if (len < sizeof (*wh)) { 2928 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2929 __func__, len); 2930 ifp->if_ierrors++; 2931 return; 2932 } 2933 2934 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2935 if (m1 == NULL) { 2936 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2937 __func__); 2938 ifp->if_ierrors++; 2939 return; 2940 } 2941 bus_dmamap_unload(ring->data_dmat, data->map); 2942 2943 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2944 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2945 if (error != 0 && error != EFBIG) { 2946 device_printf(sc->sc_dev, 2947 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2948 m_freem(m1); 2949 2950 /* Try to reload the old mbuf. */ 2951 error = bus_dmamap_load(ring->data_dmat, data->map, 2952 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2953 &paddr, BUS_DMA_NOWAIT); 2954 if (error != 0 && error != EFBIG) { 2955 panic("%s: could not load old RX mbuf", __func__); 2956 } 2957 /* Physical address may have changed. */ 2958 ring->desc[ring->cur] = htole32(paddr >> 8); 2959 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2960 BUS_DMASYNC_PREWRITE); 2961 ifp->if_ierrors++; 2962 return; 2963 } 2964 2965 m = data->m; 2966 data->m = m1; 2967 /* Update RX descriptor. */ 2968 ring->desc[ring->cur] = htole32(paddr >> 8); 2969 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2970 BUS_DMASYNC_PREWRITE); 2971 2972 /* Finalize mbuf. */ 2973 m->m_pkthdr.rcvif = ifp; 2974 m->m_data = head; 2975 m->m_pkthdr.len = m->m_len = len; 2976 2977 /* Grab a reference to the source node. */ 2978 wh = mtod(m, struct ieee80211_frame *); 2979 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2980 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2981 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2982 2983 rssi = ops->get_rssi(sc, stat); 2984 2985 if (ieee80211_radiotap_active(ic)) { 2986 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2987 2988 tap->wr_flags = 0; 2989 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2990 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2991 tap->wr_dbm_antsignal = (int8_t)rssi; 2992 tap->wr_dbm_antnoise = (int8_t)nf; 2993 tap->wr_tsft = stat->tstamp; 2994 switch (stat->rate) { 2995 /* CCK rates. */ 2996 case 10: tap->wr_rate = 2; break; 2997 case 20: tap->wr_rate = 4; break; 2998 case 55: tap->wr_rate = 11; break; 2999 case 110: tap->wr_rate = 22; break; 3000 /* OFDM rates. */ 3001 case 0xd: tap->wr_rate = 12; break; 3002 case 0xf: tap->wr_rate = 18; break; 3003 case 0x5: tap->wr_rate = 24; break; 3004 case 0x7: tap->wr_rate = 36; break; 3005 case 0x9: tap->wr_rate = 48; break; 3006 case 0xb: tap->wr_rate = 72; break; 3007 case 0x1: tap->wr_rate = 96; break; 3008 case 0x3: tap->wr_rate = 108; break; 3009 /* Unknown rate: should not happen. */ 3010 default: tap->wr_rate = 0; 3011 } 3012 } 3013 3014 IWN_UNLOCK(sc); 3015 3016 /* Send the frame to the 802.11 layer. */ 3017 if (ni != NULL) { 3018 if (ni->ni_flags & IEEE80211_NODE_HT) 3019 m->m_flags |= M_AMPDU; 3020 (void)ieee80211_input(ni, m, rssi - nf, nf); 3021 /* Node is no longer needed. */ 3022 ieee80211_free_node(ni); 3023 } else 3024 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3025 3026 IWN_LOCK(sc); 3027 3028 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3029 3030 } 3031 3032 /* Process an incoming Compressed BlockAck. */ 3033 static void 3034 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3035 struct iwn_rx_data *data) 3036 { 3037 struct iwn_ops *ops = &sc->ops; 3038 struct ifnet *ifp = sc->sc_ifp; 3039 struct iwn_node *wn; 3040 struct ieee80211_node *ni; 3041 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3042 struct iwn_tx_ring *txq; 3043 struct iwn_tx_data *txdata; 3044 struct ieee80211_tx_ampdu *tap; 3045 struct mbuf *m; 3046 uint64_t bitmap; 3047 uint16_t ssn; 3048 uint8_t tid; 3049 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3050 3051 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3052 3053 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3054 3055 qid = le16toh(ba->qid); 3056 txq = &sc->txq[ba->qid]; 3057 tap = sc->qid2tap[ba->qid]; 3058 tid = tap->txa_tid; 3059 wn = (void *)tap->txa_ni; 3060 3061 res = NULL; 3062 ssn = 0; 3063 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3064 res = tap->txa_private; 3065 ssn = tap->txa_start & 0xfff; 3066 } 3067 3068 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3069 txdata = &txq->data[txq->read]; 3070 3071 /* Unmap and free mbuf. */ 3072 bus_dmamap_sync(txq->data_dmat, txdata->map, 3073 BUS_DMASYNC_POSTWRITE); 3074 bus_dmamap_unload(txq->data_dmat, txdata->map); 3075 m = txdata->m, txdata->m = NULL; 3076 ni = txdata->ni, txdata->ni = NULL; 3077 3078 KASSERT(ni != NULL, ("no node")); 3079 KASSERT(m != NULL, ("no mbuf")); 3080 3081 ieee80211_tx_complete(ni, m, 1); 3082 3083 txq->queued--; 3084 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3085 } 3086 3087 if (txq->queued == 0 && res != NULL) { 3088 iwn_nic_lock(sc); 3089 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3090 iwn_nic_unlock(sc); 3091 sc->qid2tap[qid] = NULL; 3092 free(res, M_DEVBUF); 3093 return; 3094 } 3095 3096 if (wn->agg[tid].bitmap == 0) 3097 return; 3098 3099 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3100 if (shift < 0) 3101 shift += 0x100; 3102 3103 if (wn->agg[tid].nframes > (64 - shift)) 3104 return; 3105 3106 ni = tap->txa_ni; 3107 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3108 for (i = 0; bitmap; i++) { 3109 if ((bitmap & 1) == 0) { 3110 ifp->if_oerrors++; 3111 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3112 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3113 } else { 3114 ifp->if_opackets++; 3115 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3116 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3117 } 3118 bitmap >>= 1; 3119 } 3120 3121 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3122 3123 } 3124 3125 /* 3126 * Process a CALIBRATION_RESULT notification sent by the initialization 3127 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3128 */ 3129 static void 3130 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3131 struct iwn_rx_data *data) 3132 { 3133 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3134 int len, idx = -1; 3135 3136 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3137 3138 /* Runtime firmware should not send such a notification. */ 3139 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3140 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3141 __func__); 3142 return; 3143 } 3144 len = (le32toh(desc->len) & 0x3fff) - 4; 3145 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3146 3147 switch (calib->code) { 3148 case IWN5000_PHY_CALIB_DC: 3149 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3150 idx = 0; 3151 break; 3152 case IWN5000_PHY_CALIB_LO: 3153 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3154 idx = 1; 3155 break; 3156 case IWN5000_PHY_CALIB_TX_IQ: 3157 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3158 idx = 2; 3159 break; 3160 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3161 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3162 idx = 3; 3163 break; 3164 case IWN5000_PHY_CALIB_BASE_BAND: 3165 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3166 idx = 4; 3167 break; 3168 } 3169 if (idx == -1) /* Ignore other results. */ 3170 return; 3171 3172 /* Save calibration result. */ 3173 if (sc->calibcmd[idx].buf != NULL) 3174 free(sc->calibcmd[idx].buf, M_DEVBUF); 3175 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 3176 if (sc->calibcmd[idx].buf == NULL) { 3177 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3178 "not enough memory for calibration result %d\n", 3179 calib->code); 3180 return; 3181 } 3182 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3183 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3184 sc->calibcmd[idx].len = len; 3185 memcpy(sc->calibcmd[idx].buf, calib, len); 3186 } 3187 3188 static void 3189 iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, 3190 struct iwn_stats *stats, int len) 3191 { 3192 struct iwn_stats_bt *stats_bt; 3193 struct iwn_stats *lstats; 3194 3195 /* 3196 * First - check whether the length is the bluetooth or normal. 3197 * 3198 * If it's normal - just copy it and bump out. 3199 * Otherwise we have to convert things. 3200 */ 3201 3202 if (len == sizeof(struct iwn_stats) + 4) { 3203 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3204 sc->last_stat_valid = 1; 3205 return; 3206 } 3207 3208 /* 3209 * If it's not the bluetooth size - log, then just copy. 3210 */ 3211 if (len != sizeof(struct iwn_stats_bt) + 4) { 3212 DPRINTF(sc, IWN_DEBUG_STATS, 3213 "%s: size of rx statistics (%d) not an expected size!\n", 3214 __func__, 3215 len); 3216 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3217 sc->last_stat_valid = 1; 3218 return; 3219 } 3220 3221 /* 3222 * Ok. Time to copy. 3223 */ 3224 stats_bt = (struct iwn_stats_bt *) stats; 3225 lstats = &sc->last_stat; 3226 3227 /* flags */ 3228 lstats->flags = stats_bt->flags; 3229 /* rx_bt */ 3230 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm, 3231 sizeof(struct iwn_rx_phy_stats)); 3232 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck, 3233 sizeof(struct iwn_rx_phy_stats)); 3234 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common, 3235 sizeof(struct iwn_rx_general_stats)); 3236 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht, 3237 sizeof(struct iwn_rx_ht_phy_stats)); 3238 /* tx */ 3239 memcpy(&lstats->tx, &stats_bt->tx, 3240 sizeof(struct iwn_tx_stats)); 3241 /* general */ 3242 memcpy(&lstats->general, &stats_bt->general, 3243 sizeof(struct iwn_general_stats)); 3244 3245 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */ 3246 sc->last_stat_valid = 1; 3247 } 3248 3249 /* 3250 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3251 * The latter is sent by the firmware after each received beacon. 3252 */ 3253 static void 3254 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3255 struct iwn_rx_data *data) 3256 { 3257 struct iwn_ops *ops = &sc->ops; 3258 struct ifnet *ifp = sc->sc_ifp; 3259 struct ieee80211com *ic = ifp->if_l2com; 3260 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3261 struct iwn_calib_state *calib = &sc->calib; 3262 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3263 struct iwn_stats *lstats; 3264 int temp; 3265 3266 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3267 3268 /* Ignore statistics received during a scan. */ 3269 if (vap->iv_state != IEEE80211_S_RUN || 3270 (ic->ic_flags & IEEE80211_F_SCAN)){ 3271 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3272 __func__); 3273 return; 3274 } 3275 3276 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3277 3278 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS, 3279 "%s: received statistics, cmd %d, len %d\n", 3280 __func__, desc->type, le16toh(desc->len)); 3281 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3282 3283 /* 3284 * Collect/track general statistics for reporting. 3285 * 3286 * This takes care of ensuring that the bluetooth sized message 3287 * will be correctly converted to the legacy sized message. 3288 */ 3289 iwn_stats_update(sc, calib, stats, le16toh(desc->len)); 3290 3291 /* 3292 * And now, let's take a reference of it to use! 3293 */ 3294 lstats = &sc->last_stat; 3295 3296 /* Test if temperature has changed. */ 3297 if (lstats->general.temp != sc->rawtemp) { 3298 /* Convert "raw" temperature to degC. */ 3299 sc->rawtemp = stats->general.temp; 3300 temp = ops->get_temperature(sc); 3301 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3302 __func__, temp); 3303 3304 /* Update TX power if need be (4965AGN only). */ 3305 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3306 iwn4965_power_calibration(sc, temp); 3307 } 3308 3309 if (desc->type != IWN_BEACON_STATISTICS) 3310 return; /* Reply to a statistics request. */ 3311 3312 sc->noise = iwn_get_noise(&lstats->rx.general); 3313 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3314 3315 /* Test that RSSI and noise are present in stats report. */ 3316 if (le32toh(lstats->rx.general.flags) != 1) { 3317 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3318 "received statistics without RSSI"); 3319 return; 3320 } 3321 3322 if (calib->state == IWN_CALIB_STATE_ASSOC) 3323 iwn_collect_noise(sc, &lstats->rx.general); 3324 else if (calib->state == IWN_CALIB_STATE_RUN) { 3325 iwn_tune_sensitivity(sc, &lstats->rx); 3326 /* 3327 * XXX TODO: Only run the RX recovery if we're associated! 3328 */ 3329 iwn_check_rx_recovery(sc, lstats); 3330 iwn_save_stats_counters(sc, lstats); 3331 } 3332 3333 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3334 } 3335 3336 /* 3337 * Save the relevant statistic counters for the next calibration 3338 * pass. 3339 */ 3340 static void 3341 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3342 { 3343 struct iwn_calib_state *calib = &sc->calib; 3344 3345 /* Save counters values for next call. */ 3346 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3347 calib->fa_cck = le32toh(rs->rx.cck.fa); 3348 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3349 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3350 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3351 3352 /* Last time we received these tick values */ 3353 sc->last_calib_ticks = ticks; 3354 } 3355 3356 /* 3357 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3358 * and 5000 adapters have different incompatible TX status formats. 3359 */ 3360 static void 3361 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3362 struct iwn_rx_data *data) 3363 { 3364 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3365 struct iwn_tx_ring *ring; 3366 int qid; 3367 3368 qid = desc->qid & 0xf; 3369 ring = &sc->txq[qid]; 3370 3371 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3372 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3373 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3374 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3375 le32toh(stat->status)); 3376 3377 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3378 if (qid >= sc->firstaggqueue) { 3379 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3380 &stat->status); 3381 } else { 3382 iwn_tx_done(sc, desc, stat->ackfailcnt, 3383 le32toh(stat->status) & 0xff); 3384 } 3385 } 3386 3387 static void 3388 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3389 struct iwn_rx_data *data) 3390 { 3391 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3392 struct iwn_tx_ring *ring; 3393 int qid; 3394 3395 qid = desc->qid & 0xf; 3396 ring = &sc->txq[qid]; 3397 3398 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3399 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3400 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3401 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3402 le32toh(stat->status)); 3403 3404 #ifdef notyet 3405 /* Reset TX scheduler slot. */ 3406 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3407 #endif 3408 3409 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3410 if (qid >= sc->firstaggqueue) { 3411 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3412 &stat->status); 3413 } else { 3414 iwn_tx_done(sc, desc, stat->ackfailcnt, 3415 le16toh(stat->status) & 0xff); 3416 } 3417 } 3418 3419 /* 3420 * Adapter-independent backend for TX_DONE firmware notifications. 3421 */ 3422 static void 3423 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3424 uint8_t status) 3425 { 3426 struct ifnet *ifp = sc->sc_ifp; 3427 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3428 struct iwn_tx_data *data = &ring->data[desc->idx]; 3429 struct mbuf *m; 3430 struct ieee80211_node *ni; 3431 struct ieee80211vap *vap; 3432 3433 KASSERT(data->ni != NULL, ("no node")); 3434 3435 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3436 3437 /* Unmap and free mbuf. */ 3438 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3439 bus_dmamap_unload(ring->data_dmat, data->map); 3440 m = data->m, data->m = NULL; 3441 ni = data->ni, data->ni = NULL; 3442 vap = ni->ni_vap; 3443 3444 /* 3445 * Update rate control statistics for the node. 3446 */ 3447 if (status & IWN_TX_FAIL) { 3448 ifp->if_oerrors++; 3449 ieee80211_ratectl_tx_complete(vap, ni, 3450 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3451 } else { 3452 ifp->if_opackets++; 3453 ieee80211_ratectl_tx_complete(vap, ni, 3454 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3455 } 3456 3457 /* 3458 * Channels marked for "radar" require traffic to be received 3459 * to unlock before we can transmit. Until traffic is seen 3460 * any attempt to transmit is returned immediately with status 3461 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3462 * happen on first authenticate after scanning. To workaround 3463 * this we ignore a failure of this sort in AUTH state so the 3464 * 802.11 layer will fall back to using a timeout to wait for 3465 * the AUTH reply. This allows the firmware time to see 3466 * traffic so a subsequent retry of AUTH succeeds. It's 3467 * unclear why the firmware does not maintain state for 3468 * channels recently visited as this would allow immediate 3469 * use of the channel after a scan (where we see traffic). 3470 */ 3471 if (status == IWN_TX_FAIL_TX_LOCKED && 3472 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3473 ieee80211_tx_complete(ni, m, 0); 3474 else 3475 ieee80211_tx_complete(ni, m, 3476 (status & IWN_TX_FAIL) != 0); 3477 3478 sc->sc_tx_timer = 0; 3479 if (--ring->queued < IWN_TX_RING_LOMARK) { 3480 sc->qfullmsk &= ~(1 << ring->qid); 3481 if (sc->qfullmsk == 0 && 3482 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3483 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3484 iwn_start_locked(ifp); 3485 } 3486 } 3487 3488 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3489 3490 } 3491 3492 /* 3493 * Process a "command done" firmware notification. This is where we wakeup 3494 * processes waiting for a synchronous command completion. 3495 */ 3496 static void 3497 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3498 { 3499 struct iwn_tx_ring *ring; 3500 struct iwn_tx_data *data; 3501 int cmd_queue_num; 3502 3503 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3504 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3505 else 3506 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3507 3508 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3509 return; /* Not a command ack. */ 3510 3511 ring = &sc->txq[cmd_queue_num]; 3512 data = &ring->data[desc->idx]; 3513 3514 /* If the command was mapped in an mbuf, free it. */ 3515 if (data->m != NULL) { 3516 bus_dmamap_sync(ring->data_dmat, data->map, 3517 BUS_DMASYNC_POSTWRITE); 3518 bus_dmamap_unload(ring->data_dmat, data->map); 3519 m_freem(data->m); 3520 data->m = NULL; 3521 } 3522 wakeup(&ring->desc[desc->idx]); 3523 } 3524 3525 static void 3526 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3527 void *stat) 3528 { 3529 struct iwn_ops *ops = &sc->ops; 3530 struct ifnet *ifp = sc->sc_ifp; 3531 struct iwn_tx_ring *ring = &sc->txq[qid]; 3532 struct iwn_tx_data *data; 3533 struct mbuf *m; 3534 struct iwn_node *wn; 3535 struct ieee80211_node *ni; 3536 struct ieee80211_tx_ampdu *tap; 3537 uint64_t bitmap; 3538 uint32_t *status = stat; 3539 uint16_t *aggstatus = stat; 3540 uint16_t ssn; 3541 uint8_t tid; 3542 int bit, i, lastidx, *res, seqno, shift, start; 3543 3544 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3545 3546 if (nframes == 1) { 3547 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3548 #ifdef NOT_YET 3549 printf("ieee80211_send_bar()\n"); 3550 #endif 3551 /* 3552 * If we completely fail a transmit, make sure a 3553 * notification is pushed up to the rate control 3554 * layer. 3555 */ 3556 tap = sc->qid2tap[qid]; 3557 tid = tap->txa_tid; 3558 wn = (void *)tap->txa_ni; 3559 ni = tap->txa_ni; 3560 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3561 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3562 } 3563 } 3564 3565 bitmap = 0; 3566 start = idx; 3567 for (i = 0; i < nframes; i++) { 3568 if (le16toh(aggstatus[i * 2]) & 0xc) 3569 continue; 3570 3571 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3572 bit = idx - start; 3573 shift = 0; 3574 if (bit >= 64) { 3575 shift = 0x100 - idx + start; 3576 bit = 0; 3577 start = idx; 3578 } else if (bit <= -64) 3579 bit = 0x100 - start + idx; 3580 else if (bit < 0) { 3581 shift = start - idx; 3582 start = idx; 3583 bit = 0; 3584 } 3585 bitmap = bitmap << shift; 3586 bitmap |= 1ULL << bit; 3587 } 3588 tap = sc->qid2tap[qid]; 3589 tid = tap->txa_tid; 3590 wn = (void *)tap->txa_ni; 3591 wn->agg[tid].bitmap = bitmap; 3592 wn->agg[tid].startidx = start; 3593 wn->agg[tid].nframes = nframes; 3594 3595 res = NULL; 3596 ssn = 0; 3597 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3598 res = tap->txa_private; 3599 ssn = tap->txa_start & 0xfff; 3600 } 3601 3602 seqno = le32toh(*(status + nframes)) & 0xfff; 3603 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3604 data = &ring->data[ring->read]; 3605 3606 /* Unmap and free mbuf. */ 3607 bus_dmamap_sync(ring->data_dmat, data->map, 3608 BUS_DMASYNC_POSTWRITE); 3609 bus_dmamap_unload(ring->data_dmat, data->map); 3610 m = data->m, data->m = NULL; 3611 ni = data->ni, data->ni = NULL; 3612 3613 KASSERT(ni != NULL, ("no node")); 3614 KASSERT(m != NULL, ("no mbuf")); 3615 3616 ieee80211_tx_complete(ni, m, 1); 3617 3618 ring->queued--; 3619 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3620 } 3621 3622 if (ring->queued == 0 && res != NULL) { 3623 iwn_nic_lock(sc); 3624 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3625 iwn_nic_unlock(sc); 3626 sc->qid2tap[qid] = NULL; 3627 free(res, M_DEVBUF); 3628 return; 3629 } 3630 3631 sc->sc_tx_timer = 0; 3632 if (ring->queued < IWN_TX_RING_LOMARK) { 3633 sc->qfullmsk &= ~(1 << ring->qid); 3634 if (sc->qfullmsk == 0 && 3635 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3636 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3637 iwn_start_locked(ifp); 3638 } 3639 } 3640 3641 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3642 3643 } 3644 3645 /* 3646 * Process an INT_FH_RX or INT_SW_RX interrupt. 3647 */ 3648 static void 3649 iwn_notif_intr(struct iwn_softc *sc) 3650 { 3651 struct iwn_ops *ops = &sc->ops; 3652 struct ifnet *ifp = sc->sc_ifp; 3653 struct ieee80211com *ic = ifp->if_l2com; 3654 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3655 uint16_t hw; 3656 3657 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3658 BUS_DMASYNC_POSTREAD); 3659 3660 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3661 while (sc->rxq.cur != hw) { 3662 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3663 struct iwn_rx_desc *desc; 3664 3665 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3666 BUS_DMASYNC_POSTREAD); 3667 desc = mtod(data->m, struct iwn_rx_desc *); 3668 3669 DPRINTF(sc, IWN_DEBUG_RECV, 3670 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3671 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3672 desc->type, iwn_intr_str(desc->type), 3673 le16toh(desc->len)); 3674 3675 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3676 iwn_cmd_done(sc, desc); 3677 3678 switch (desc->type) { 3679 case IWN_RX_PHY: 3680 iwn_rx_phy(sc, desc, data); 3681 break; 3682 3683 case IWN_RX_DONE: /* 4965AGN only. */ 3684 case IWN_MPDU_RX_DONE: 3685 /* An 802.11 frame has been received. */ 3686 iwn_rx_done(sc, desc, data); 3687 break; 3688 3689 case IWN_RX_COMPRESSED_BA: 3690 /* A Compressed BlockAck has been received. */ 3691 iwn_rx_compressed_ba(sc, desc, data); 3692 break; 3693 3694 case IWN_TX_DONE: 3695 /* An 802.11 frame has been transmitted. */ 3696 ops->tx_done(sc, desc, data); 3697 break; 3698 3699 case IWN_RX_STATISTICS: 3700 case IWN_BEACON_STATISTICS: 3701 iwn_rx_statistics(sc, desc, data); 3702 break; 3703 3704 case IWN_BEACON_MISSED: 3705 { 3706 struct iwn_beacon_missed *miss = 3707 (struct iwn_beacon_missed *)(desc + 1); 3708 int misses; 3709 3710 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3711 BUS_DMASYNC_POSTREAD); 3712 misses = le32toh(miss->consecutive); 3713 3714 DPRINTF(sc, IWN_DEBUG_STATE, 3715 "%s: beacons missed %d/%d\n", __func__, 3716 misses, le32toh(miss->total)); 3717 /* 3718 * If more than 5 consecutive beacons are missed, 3719 * reinitialize the sensitivity state machine. 3720 */ 3721 if (vap->iv_state == IEEE80211_S_RUN && 3722 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3723 if (misses > 5) 3724 (void)iwn_init_sensitivity(sc); 3725 if (misses >= vap->iv_bmissthreshold) { 3726 IWN_UNLOCK(sc); 3727 ieee80211_beacon_miss(ic); 3728 IWN_LOCK(sc); 3729 } 3730 } 3731 break; 3732 } 3733 case IWN_UC_READY: 3734 { 3735 struct iwn_ucode_info *uc = 3736 (struct iwn_ucode_info *)(desc + 1); 3737 3738 /* The microcontroller is ready. */ 3739 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3740 BUS_DMASYNC_POSTREAD); 3741 DPRINTF(sc, IWN_DEBUG_RESET, 3742 "microcode alive notification version=%d.%d " 3743 "subtype=%x alive=%x\n", uc->major, uc->minor, 3744 uc->subtype, le32toh(uc->valid)); 3745 3746 if (le32toh(uc->valid) != 1) { 3747 device_printf(sc->sc_dev, 3748 "microcontroller initialization failed"); 3749 break; 3750 } 3751 if (uc->subtype == IWN_UCODE_INIT) { 3752 /* Save microcontroller report. */ 3753 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3754 } 3755 /* Save the address of the error log in SRAM. */ 3756 sc->errptr = le32toh(uc->errptr); 3757 break; 3758 } 3759 case IWN_STATE_CHANGED: 3760 { 3761 /* 3762 * State change allows hardware switch change to be 3763 * noted. However, we handle this in iwn_intr as we 3764 * get both the enable/disble intr. 3765 */ 3766 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3767 BUS_DMASYNC_POSTREAD); 3768 #ifdef IWN_DEBUG 3769 uint32_t *status = (uint32_t *)(desc + 1); 3770 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3771 "state changed to %x\n", 3772 le32toh(*status)); 3773 #endif 3774 break; 3775 } 3776 case IWN_START_SCAN: 3777 { 3778 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3779 BUS_DMASYNC_POSTREAD); 3780 #ifdef IWN_DEBUG 3781 struct iwn_start_scan *scan = 3782 (struct iwn_start_scan *)(desc + 1); 3783 DPRINTF(sc, IWN_DEBUG_ANY, 3784 "%s: scanning channel %d status %x\n", 3785 __func__, scan->chan, le32toh(scan->status)); 3786 #endif 3787 break; 3788 } 3789 case IWN_STOP_SCAN: 3790 { 3791 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3792 BUS_DMASYNC_POSTREAD); 3793 #ifdef IWN_DEBUG 3794 struct iwn_stop_scan *scan = 3795 (struct iwn_stop_scan *)(desc + 1); 3796 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3797 "scan finished nchan=%d status=%d chan=%d\n", 3798 scan->nchan, scan->status, scan->chan); 3799 #endif 3800 sc->sc_is_scanning = 0; 3801 IWN_UNLOCK(sc); 3802 ieee80211_scan_next(vap); 3803 IWN_LOCK(sc); 3804 break; 3805 } 3806 case IWN5000_CALIBRATION_RESULT: 3807 iwn5000_rx_calib_results(sc, desc, data); 3808 break; 3809 3810 case IWN5000_CALIBRATION_DONE: 3811 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3812 wakeup(sc); 3813 break; 3814 } 3815 3816 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3817 } 3818 3819 /* Tell the firmware what we have processed. */ 3820 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3821 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3822 } 3823 3824 /* 3825 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3826 * from power-down sleep mode. 3827 */ 3828 static void 3829 iwn_wakeup_intr(struct iwn_softc *sc) 3830 { 3831 int qid; 3832 3833 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3834 __func__); 3835 3836 /* Wakeup RX and TX rings. */ 3837 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3838 for (qid = 0; qid < sc->ntxqs; qid++) { 3839 struct iwn_tx_ring *ring = &sc->txq[qid]; 3840 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3841 } 3842 } 3843 3844 static void 3845 iwn_rftoggle_intr(struct iwn_softc *sc) 3846 { 3847 struct ifnet *ifp = sc->sc_ifp; 3848 struct ieee80211com *ic = ifp->if_l2com; 3849 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3850 3851 IWN_LOCK_ASSERT(sc); 3852 3853 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3854 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3855 if (tmp & IWN_GP_CNTRL_RFKILL) 3856 ieee80211_runtask(ic, &sc->sc_radioon_task); 3857 else 3858 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3859 } 3860 3861 /* 3862 * Dump the error log of the firmware when a firmware panic occurs. Although 3863 * we can't debug the firmware because it is neither open source nor free, it 3864 * can help us to identify certain classes of problems. 3865 */ 3866 static void 3867 iwn_fatal_intr(struct iwn_softc *sc) 3868 { 3869 struct iwn_fw_dump dump; 3870 int i; 3871 3872 IWN_LOCK_ASSERT(sc); 3873 3874 /* Force a complete recalibration on next init. */ 3875 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3876 3877 /* Check that the error log address is valid. */ 3878 if (sc->errptr < IWN_FW_DATA_BASE || 3879 sc->errptr + sizeof (dump) > 3880 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3881 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3882 sc->errptr); 3883 return; 3884 } 3885 if (iwn_nic_lock(sc) != 0) { 3886 printf("%s: could not read firmware error log\n", __func__); 3887 return; 3888 } 3889 /* Read firmware error log from SRAM. */ 3890 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3891 sizeof (dump) / sizeof (uint32_t)); 3892 iwn_nic_unlock(sc); 3893 3894 if (dump.valid == 0) { 3895 printf("%s: firmware error log is empty\n", __func__); 3896 return; 3897 } 3898 printf("firmware error log:\n"); 3899 printf(" error type = \"%s\" (0x%08X)\n", 3900 (dump.id < nitems(iwn_fw_errmsg)) ? 3901 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3902 dump.id); 3903 printf(" program counter = 0x%08X\n", dump.pc); 3904 printf(" source line = 0x%08X\n", dump.src_line); 3905 printf(" error data = 0x%08X%08X\n", 3906 dump.error_data[0], dump.error_data[1]); 3907 printf(" branch link = 0x%08X%08X\n", 3908 dump.branch_link[0], dump.branch_link[1]); 3909 printf(" interrupt link = 0x%08X%08X\n", 3910 dump.interrupt_link[0], dump.interrupt_link[1]); 3911 printf(" time = %u\n", dump.time[0]); 3912 3913 /* Dump driver status (TX and RX rings) while we're here. */ 3914 printf("driver status:\n"); 3915 for (i = 0; i < sc->ntxqs; i++) { 3916 struct iwn_tx_ring *ring = &sc->txq[i]; 3917 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3918 i, ring->qid, ring->cur, ring->queued); 3919 } 3920 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3921 } 3922 3923 static void 3924 iwn_intr(void *arg) 3925 { 3926 struct iwn_softc *sc = arg; 3927 struct ifnet *ifp = sc->sc_ifp; 3928 uint32_t r1, r2, tmp; 3929 3930 IWN_LOCK(sc); 3931 3932 /* Disable interrupts. */ 3933 IWN_WRITE(sc, IWN_INT_MASK, 0); 3934 3935 /* Read interrupts from ICT (fast) or from registers (slow). */ 3936 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3937 tmp = 0; 3938 while (sc->ict[sc->ict_cur] != 0) { 3939 tmp |= sc->ict[sc->ict_cur]; 3940 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3941 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3942 } 3943 tmp = le32toh(tmp); 3944 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3945 tmp = 0; 3946 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3947 tmp |= 0x8000; 3948 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3949 r2 = 0; /* Unused. */ 3950 } else { 3951 r1 = IWN_READ(sc, IWN_INT); 3952 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3953 return; /* Hardware gone! */ 3954 r2 = IWN_READ(sc, IWN_FH_INT); 3955 } 3956 3957 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3958 , r1, r2); 3959 3960 if (r1 == 0 && r2 == 0) 3961 goto done; /* Interrupt not for us. */ 3962 3963 /* Acknowledge interrupts. */ 3964 IWN_WRITE(sc, IWN_INT, r1); 3965 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3966 IWN_WRITE(sc, IWN_FH_INT, r2); 3967 3968 if (r1 & IWN_INT_RF_TOGGLED) { 3969 iwn_rftoggle_intr(sc); 3970 goto done; 3971 } 3972 if (r1 & IWN_INT_CT_REACHED) { 3973 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3974 __func__); 3975 } 3976 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3977 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3978 __func__); 3979 #ifdef IWN_DEBUG 3980 iwn_debug_register(sc); 3981 #endif 3982 /* Dump firmware error log and stop. */ 3983 iwn_fatal_intr(sc); 3984 3985 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task); 3986 goto done; 3987 } 3988 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3989 (r2 & IWN_FH_INT_RX)) { 3990 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3991 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3992 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3993 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3994 IWN_INT_PERIODIC_DIS); 3995 iwn_notif_intr(sc); 3996 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3997 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3998 IWN_INT_PERIODIC_ENA); 3999 } 4000 } else 4001 iwn_notif_intr(sc); 4002 } 4003 4004 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 4005 if (sc->sc_flags & IWN_FLAG_USE_ICT) 4006 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 4007 wakeup(sc); /* FH DMA transfer completed. */ 4008 } 4009 4010 if (r1 & IWN_INT_ALIVE) 4011 wakeup(sc); /* Firmware is alive. */ 4012 4013 if (r1 & IWN_INT_WAKEUP) 4014 iwn_wakeup_intr(sc); 4015 4016 done: 4017 /* Re-enable interrupts. */ 4018 if (ifp->if_flags & IFF_UP) 4019 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 4020 4021 IWN_UNLOCK(sc); 4022 } 4023 4024 /* 4025 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 4026 * 5000 adapters use a slightly different format). 4027 */ 4028 static void 4029 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4030 uint16_t len) 4031 { 4032 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 4033 4034 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4035 4036 *w = htole16(len + 8); 4037 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4038 BUS_DMASYNC_PREWRITE); 4039 if (idx < IWN_SCHED_WINSZ) { 4040 *(w + IWN_TX_RING_COUNT) = *w; 4041 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4042 BUS_DMASYNC_PREWRITE); 4043 } 4044 } 4045 4046 static void 4047 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4048 uint16_t len) 4049 { 4050 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4051 4052 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4053 4054 *w = htole16(id << 12 | (len + 8)); 4055 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4056 BUS_DMASYNC_PREWRITE); 4057 if (idx < IWN_SCHED_WINSZ) { 4058 *(w + IWN_TX_RING_COUNT) = *w; 4059 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4060 BUS_DMASYNC_PREWRITE); 4061 } 4062 } 4063 4064 #ifdef notyet 4065 static void 4066 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 4067 { 4068 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4069 4070 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4071 4072 *w = (*w & htole16(0xf000)) | htole16(1); 4073 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4074 BUS_DMASYNC_PREWRITE); 4075 if (idx < IWN_SCHED_WINSZ) { 4076 *(w + IWN_TX_RING_COUNT) = *w; 4077 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4078 BUS_DMASYNC_PREWRITE); 4079 } 4080 } 4081 #endif 4082 4083 /* 4084 * Check whether OFDM 11g protection will be enabled for the given rate. 4085 * 4086 * The original driver code only enabled protection for OFDM rates. 4087 * It didn't check to see whether it was operating in 11a or 11bg mode. 4088 */ 4089 static int 4090 iwn_check_rate_needs_protection(struct iwn_softc *sc, 4091 struct ieee80211vap *vap, uint8_t rate) 4092 { 4093 struct ieee80211com *ic = vap->iv_ic; 4094 4095 /* 4096 * Not in 2GHz mode? Then there's no need to enable OFDM 4097 * 11bg protection. 4098 */ 4099 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 4100 return (0); 4101 } 4102 4103 /* 4104 * 11bg protection not enabled? Then don't use it. 4105 */ 4106 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 4107 return (0); 4108 4109 /* 4110 * If it's an 11n rate, then for now we enable 4111 * protection. 4112 */ 4113 if (rate & IEEE80211_RATE_MCS) { 4114 return (1); 4115 } 4116 4117 /* 4118 * Do a rate table lookup. If the PHY is CCK, 4119 * don't do protection. 4120 */ 4121 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4122 return (0); 4123 4124 /* 4125 * Yup, enable protection. 4126 */ 4127 return (1); 4128 } 4129 4130 /* 4131 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4132 * the link quality table that reflects this particular entry. 4133 */ 4134 static int 4135 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4136 uint8_t rate) 4137 { 4138 struct ieee80211_rateset *rs; 4139 int is_11n; 4140 int nr; 4141 int i; 4142 uint8_t cmp_rate; 4143 4144 /* 4145 * Figure out if we're using 11n or not here. 4146 */ 4147 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4148 is_11n = 1; 4149 else 4150 is_11n = 0; 4151 4152 /* 4153 * Use the correct rate table. 4154 */ 4155 if (is_11n) { 4156 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4157 nr = ni->ni_htrates.rs_nrates; 4158 } else { 4159 rs = &ni->ni_rates; 4160 nr = rs->rs_nrates; 4161 } 4162 4163 /* 4164 * Find the relevant link quality entry in the table. 4165 */ 4166 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4167 /* 4168 * The link quality table index starts at 0 == highest 4169 * rate, so we walk the rate table backwards. 4170 */ 4171 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4172 if (rate & IEEE80211_RATE_MCS) 4173 cmp_rate |= IEEE80211_RATE_MCS; 4174 4175 #if 0 4176 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4177 __func__, 4178 i, 4179 nr, 4180 rate, 4181 cmp_rate); 4182 #endif 4183 4184 if (cmp_rate == rate) 4185 return (i); 4186 } 4187 4188 /* Failed? Start at the end */ 4189 return (IWN_MAX_TX_RETRIES - 1); 4190 } 4191 4192 static int 4193 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4194 { 4195 struct iwn_ops *ops = &sc->ops; 4196 const struct ieee80211_txparam *tp; 4197 struct ieee80211vap *vap = ni->ni_vap; 4198 struct ieee80211com *ic = ni->ni_ic; 4199 struct iwn_node *wn = (void *)ni; 4200 struct iwn_tx_ring *ring; 4201 struct iwn_tx_desc *desc; 4202 struct iwn_tx_data *data; 4203 struct iwn_tx_cmd *cmd; 4204 struct iwn_cmd_data *tx; 4205 struct ieee80211_frame *wh; 4206 struct ieee80211_key *k = NULL; 4207 struct mbuf *m1; 4208 uint32_t flags; 4209 uint16_t qos; 4210 u_int hdrlen; 4211 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4212 uint8_t tid, type; 4213 int ac, i, totlen, error, pad, nsegs = 0, rate; 4214 4215 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4216 4217 IWN_LOCK_ASSERT(sc); 4218 4219 wh = mtod(m, struct ieee80211_frame *); 4220 hdrlen = ieee80211_anyhdrsize(wh); 4221 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4222 4223 /* Select EDCA Access Category and TX ring for this frame. */ 4224 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4225 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4226 tid = qos & IEEE80211_QOS_TID; 4227 } else { 4228 qos = 0; 4229 tid = 0; 4230 } 4231 ac = M_WME_GETAC(m); 4232 if (m->m_flags & M_AMPDU_MPDU) { 4233 uint16_t seqno; 4234 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4235 4236 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4237 m_freem(m); 4238 return EINVAL; 4239 } 4240 4241 /* 4242 * Queue this frame to the hardware ring that we've 4243 * negotiated AMPDU TX on. 4244 * 4245 * Note that the sequence number must match the TX slot 4246 * being used! 4247 */ 4248 ac = *(int *)tap->txa_private; 4249 seqno = ni->ni_txseqs[tid]; 4250 *(uint16_t *)wh->i_seq = 4251 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4252 ring = &sc->txq[ac]; 4253 if ((seqno % 256) != ring->cur) { 4254 device_printf(sc->sc_dev, 4255 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4256 __func__, 4257 m, 4258 seqno, 4259 seqno % 256, 4260 ring->cur); 4261 } 4262 ni->ni_txseqs[tid]++; 4263 } 4264 ring = &sc->txq[ac]; 4265 desc = &ring->desc[ring->cur]; 4266 data = &ring->data[ring->cur]; 4267 4268 /* Choose a TX rate index. */ 4269 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4270 if (type == IEEE80211_FC0_TYPE_MGT) 4271 rate = tp->mgmtrate; 4272 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4273 rate = tp->mcastrate; 4274 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4275 rate = tp->ucastrate; 4276 else if (m->m_flags & M_EAPOL) 4277 rate = tp->mgmtrate; 4278 else { 4279 /* XXX pass pktlen */ 4280 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4281 rate = ni->ni_txrate; 4282 } 4283 4284 /* Encrypt the frame if need be. */ 4285 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 4286 /* Retrieve key for TX. */ 4287 k = ieee80211_crypto_encap(ni, m); 4288 if (k == NULL) { 4289 m_freem(m); 4290 return ENOBUFS; 4291 } 4292 /* 802.11 header may have moved. */ 4293 wh = mtod(m, struct ieee80211_frame *); 4294 } 4295 totlen = m->m_pkthdr.len; 4296 4297 if (ieee80211_radiotap_active_vap(vap)) { 4298 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4299 4300 tap->wt_flags = 0; 4301 tap->wt_rate = rate; 4302 if (k != NULL) 4303 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4304 4305 ieee80211_radiotap_tx(vap, m); 4306 } 4307 4308 /* Prepare TX firmware command. */ 4309 cmd = &ring->cmd[ring->cur]; 4310 cmd->code = IWN_CMD_TX_DATA; 4311 cmd->flags = 0; 4312 cmd->qid = ring->qid; 4313 cmd->idx = ring->cur; 4314 4315 tx = (struct iwn_cmd_data *)cmd->data; 4316 /* NB: No need to clear tx, all fields are reinitialized here. */ 4317 tx->scratch = 0; /* clear "scratch" area */ 4318 4319 flags = 0; 4320 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4321 /* Unicast frame, check if an ACK is expected. */ 4322 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4323 IEEE80211_QOS_ACKPOLICY_NOACK) 4324 flags |= IWN_TX_NEED_ACK; 4325 } 4326 if ((wh->i_fc[0] & 4327 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4328 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4329 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4330 4331 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4332 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4333 4334 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4335 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4336 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4337 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4338 flags |= IWN_TX_NEED_RTS; 4339 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4340 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4341 flags |= IWN_TX_NEED_CTS; 4342 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4343 flags |= IWN_TX_NEED_RTS; 4344 } 4345 4346 /* XXX HT protection? */ 4347 4348 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4349 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4350 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4351 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4352 flags |= IWN_TX_NEED_PROTECTION; 4353 } else 4354 flags |= IWN_TX_FULL_TXOP; 4355 } 4356 } 4357 4358 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4359 type != IEEE80211_FC0_TYPE_DATA) 4360 tx->id = sc->broadcast_id; 4361 else 4362 tx->id = wn->id; 4363 4364 if (type == IEEE80211_FC0_TYPE_MGT) { 4365 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4366 4367 /* Tell HW to set timestamp in probe responses. */ 4368 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4369 flags |= IWN_TX_INSERT_TSTAMP; 4370 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4371 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4372 tx->timeout = htole16(3); 4373 else 4374 tx->timeout = htole16(2); 4375 } else 4376 tx->timeout = htole16(0); 4377 4378 if (hdrlen & 3) { 4379 /* First segment length must be a multiple of 4. */ 4380 flags |= IWN_TX_NEED_PADDING; 4381 pad = 4 - (hdrlen & 3); 4382 } else 4383 pad = 0; 4384 4385 tx->len = htole16(totlen); 4386 tx->tid = tid; 4387 tx->rts_ntries = 60; 4388 tx->data_ntries = 15; 4389 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4390 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4391 if (tx->id == sc->broadcast_id) { 4392 /* Group or management frame. */ 4393 tx->linkq = 0; 4394 } else { 4395 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4396 flags |= IWN_TX_LINKQ; /* enable MRR */ 4397 } 4398 4399 /* Set physical address of "scratch area". */ 4400 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4401 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4402 4403 /* Copy 802.11 header in TX command. */ 4404 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4405 4406 /* Trim 802.11 header. */ 4407 m_adj(m, hdrlen); 4408 tx->security = 0; 4409 tx->flags = htole32(flags); 4410 4411 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4412 &nsegs, BUS_DMA_NOWAIT); 4413 if (error != 0) { 4414 if (error != EFBIG) { 4415 device_printf(sc->sc_dev, 4416 "%s: can't map mbuf (error %d)\n", __func__, error); 4417 m_freem(m); 4418 return error; 4419 } 4420 /* Too many DMA segments, linearize mbuf. */ 4421 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4422 if (m1 == NULL) { 4423 device_printf(sc->sc_dev, 4424 "%s: could not defrag mbuf\n", __func__); 4425 m_freem(m); 4426 return ENOBUFS; 4427 } 4428 m = m1; 4429 4430 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4431 segs, &nsegs, BUS_DMA_NOWAIT); 4432 if (error != 0) { 4433 device_printf(sc->sc_dev, 4434 "%s: can't map mbuf (error %d)\n", __func__, error); 4435 m_freem(m); 4436 return error; 4437 } 4438 } 4439 4440 data->m = m; 4441 data->ni = ni; 4442 4443 DPRINTF(sc, IWN_DEBUG_XMIT, 4444 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4445 __func__, 4446 ring->qid, 4447 ring->cur, 4448 m->m_pkthdr.len, 4449 nsegs, 4450 rate, 4451 tx->rate); 4452 4453 /* Fill TX descriptor. */ 4454 desc->nsegs = 1; 4455 if (m->m_len != 0) 4456 desc->nsegs += nsegs; 4457 /* First DMA segment is used by the TX command. */ 4458 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4459 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4460 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4461 /* Other DMA segments are for data payload. */ 4462 seg = &segs[0]; 4463 for (i = 1; i <= nsegs; i++) { 4464 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4465 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4466 seg->ds_len << 4); 4467 seg++; 4468 } 4469 4470 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4471 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4472 BUS_DMASYNC_PREWRITE); 4473 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4474 BUS_DMASYNC_PREWRITE); 4475 4476 /* Update TX scheduler. */ 4477 if (ring->qid >= sc->firstaggqueue) 4478 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4479 4480 /* Kick TX ring. */ 4481 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4482 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4483 4484 /* Mark TX ring as full if we reach a certain threshold. */ 4485 if (++ring->queued > IWN_TX_RING_HIMARK) 4486 sc->qfullmsk |= 1 << ring->qid; 4487 4488 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4489 4490 return 0; 4491 } 4492 4493 static int 4494 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4495 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4496 { 4497 struct iwn_ops *ops = &sc->ops; 4498 // struct ifnet *ifp = sc->sc_ifp; 4499 struct ieee80211vap *vap = ni->ni_vap; 4500 // struct ieee80211com *ic = ifp->if_l2com; 4501 struct iwn_tx_cmd *cmd; 4502 struct iwn_cmd_data *tx; 4503 struct ieee80211_frame *wh; 4504 struct iwn_tx_ring *ring; 4505 struct iwn_tx_desc *desc; 4506 struct iwn_tx_data *data; 4507 struct mbuf *m1; 4508 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4509 uint32_t flags; 4510 u_int hdrlen; 4511 int ac, totlen, error, pad, nsegs = 0, i, rate; 4512 uint8_t type; 4513 4514 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4515 4516 IWN_LOCK_ASSERT(sc); 4517 4518 wh = mtod(m, struct ieee80211_frame *); 4519 hdrlen = ieee80211_anyhdrsize(wh); 4520 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4521 4522 ac = params->ibp_pri & 3; 4523 4524 ring = &sc->txq[ac]; 4525 desc = &ring->desc[ring->cur]; 4526 data = &ring->data[ring->cur]; 4527 4528 /* Choose a TX rate. */ 4529 rate = params->ibp_rate0; 4530 totlen = m->m_pkthdr.len; 4531 4532 /* Prepare TX firmware command. */ 4533 cmd = &ring->cmd[ring->cur]; 4534 cmd->code = IWN_CMD_TX_DATA; 4535 cmd->flags = 0; 4536 cmd->qid = ring->qid; 4537 cmd->idx = ring->cur; 4538 4539 tx = (struct iwn_cmd_data *)cmd->data; 4540 /* NB: No need to clear tx, all fields are reinitialized here. */ 4541 tx->scratch = 0; /* clear "scratch" area */ 4542 4543 flags = 0; 4544 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4545 flags |= IWN_TX_NEED_ACK; 4546 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4547 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4548 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4549 flags &= ~IWN_TX_NEED_RTS; 4550 flags |= IWN_TX_NEED_PROTECTION; 4551 } else 4552 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4553 } 4554 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4555 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4556 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4557 flags &= ~IWN_TX_NEED_CTS; 4558 flags |= IWN_TX_NEED_PROTECTION; 4559 } else 4560 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4561 } 4562 if (type == IEEE80211_FC0_TYPE_MGT) { 4563 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4564 4565 /* Tell HW to set timestamp in probe responses. */ 4566 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4567 flags |= IWN_TX_INSERT_TSTAMP; 4568 4569 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4570 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4571 tx->timeout = htole16(3); 4572 else 4573 tx->timeout = htole16(2); 4574 } else 4575 tx->timeout = htole16(0); 4576 4577 if (hdrlen & 3) { 4578 /* First segment length must be a multiple of 4. */ 4579 flags |= IWN_TX_NEED_PADDING; 4580 pad = 4 - (hdrlen & 3); 4581 } else 4582 pad = 0; 4583 4584 if (ieee80211_radiotap_active_vap(vap)) { 4585 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4586 4587 tap->wt_flags = 0; 4588 tap->wt_rate = rate; 4589 4590 ieee80211_radiotap_tx(vap, m); 4591 } 4592 4593 tx->len = htole16(totlen); 4594 tx->tid = 0; 4595 tx->id = sc->broadcast_id; 4596 tx->rts_ntries = params->ibp_try1; 4597 tx->data_ntries = params->ibp_try0; 4598 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4599 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4600 4601 /* Group or management frame. */ 4602 tx->linkq = 0; 4603 4604 /* Set physical address of "scratch area". */ 4605 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4606 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4607 4608 /* Copy 802.11 header in TX command. */ 4609 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4610 4611 /* Trim 802.11 header. */ 4612 m_adj(m, hdrlen); 4613 tx->security = 0; 4614 tx->flags = htole32(flags); 4615 4616 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4617 &nsegs, BUS_DMA_NOWAIT); 4618 if (error != 0) { 4619 if (error != EFBIG) { 4620 device_printf(sc->sc_dev, 4621 "%s: can't map mbuf (error %d)\n", __func__, error); 4622 m_freem(m); 4623 return error; 4624 } 4625 /* Too many DMA segments, linearize mbuf. */ 4626 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4627 if (m1 == NULL) { 4628 device_printf(sc->sc_dev, 4629 "%s: could not defrag mbuf\n", __func__); 4630 m_freem(m); 4631 return ENOBUFS; 4632 } 4633 m = m1; 4634 4635 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4636 segs, &nsegs, BUS_DMA_NOWAIT); 4637 if (error != 0) { 4638 device_printf(sc->sc_dev, 4639 "%s: can't map mbuf (error %d)\n", __func__, error); 4640 m_freem(m); 4641 return error; 4642 } 4643 } 4644 4645 data->m = m; 4646 data->ni = ni; 4647 4648 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4649 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4650 4651 /* Fill TX descriptor. */ 4652 desc->nsegs = 1; 4653 if (m->m_len != 0) 4654 desc->nsegs += nsegs; 4655 /* First DMA segment is used by the TX command. */ 4656 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4657 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4658 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4659 /* Other DMA segments are for data payload. */ 4660 seg = &segs[0]; 4661 for (i = 1; i <= nsegs; i++) { 4662 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4663 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4664 seg->ds_len << 4); 4665 seg++; 4666 } 4667 4668 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4669 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4670 BUS_DMASYNC_PREWRITE); 4671 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4672 BUS_DMASYNC_PREWRITE); 4673 4674 /* Update TX scheduler. */ 4675 if (ring->qid >= sc->firstaggqueue) 4676 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4677 4678 /* Kick TX ring. */ 4679 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4680 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4681 4682 /* Mark TX ring as full if we reach a certain threshold. */ 4683 if (++ring->queued > IWN_TX_RING_HIMARK) 4684 sc->qfullmsk |= 1 << ring->qid; 4685 4686 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4687 4688 return 0; 4689 } 4690 4691 static int 4692 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4693 const struct ieee80211_bpf_params *params) 4694 { 4695 struct ieee80211com *ic = ni->ni_ic; 4696 struct ifnet *ifp = ic->ic_ifp; 4697 struct iwn_softc *sc = ifp->if_softc; 4698 int error = 0; 4699 4700 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4701 4702 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4703 ieee80211_free_node(ni); 4704 m_freem(m); 4705 return ENETDOWN; 4706 } 4707 4708 IWN_LOCK(sc); 4709 if (params == NULL) { 4710 /* 4711 * Legacy path; interpret frame contents to decide 4712 * precisely how to send the frame. 4713 */ 4714 error = iwn_tx_data(sc, m, ni); 4715 } else { 4716 /* 4717 * Caller supplied explicit parameters to use in 4718 * sending the frame. 4719 */ 4720 error = iwn_tx_data_raw(sc, m, ni, params); 4721 } 4722 if (error != 0) { 4723 /* NB: m is reclaimed on tx failure */ 4724 ieee80211_free_node(ni); 4725 ifp->if_oerrors++; 4726 } 4727 sc->sc_tx_timer = 5; 4728 4729 IWN_UNLOCK(sc); 4730 4731 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4732 4733 return error; 4734 } 4735 4736 static void 4737 iwn_start(struct ifnet *ifp) 4738 { 4739 struct iwn_softc *sc = ifp->if_softc; 4740 4741 IWN_LOCK(sc); 4742 iwn_start_locked(ifp); 4743 IWN_UNLOCK(sc); 4744 } 4745 4746 static void 4747 iwn_start_locked(struct ifnet *ifp) 4748 { 4749 struct iwn_softc *sc = ifp->if_softc; 4750 struct ieee80211_node *ni; 4751 struct mbuf *m; 4752 4753 IWN_LOCK_ASSERT(sc); 4754 4755 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4756 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4757 return; 4758 4759 for (;;) { 4760 if (sc->qfullmsk != 0) { 4761 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4762 break; 4763 } 4764 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4765 if (m == NULL) 4766 break; 4767 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4768 if (iwn_tx_data(sc, m, ni) != 0) { 4769 ieee80211_free_node(ni); 4770 ifp->if_oerrors++; 4771 continue; 4772 } 4773 sc->sc_tx_timer = 5; 4774 } 4775 } 4776 4777 static void 4778 iwn_watchdog(void *arg) 4779 { 4780 struct iwn_softc *sc = arg; 4781 struct ifnet *ifp = sc->sc_ifp; 4782 struct ieee80211com *ic = ifp->if_l2com; 4783 4784 IWN_LOCK_ASSERT(sc); 4785 4786 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4787 4788 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4789 4790 if (sc->sc_tx_timer > 0) { 4791 if (--sc->sc_tx_timer == 0) { 4792 if_printf(ifp, "device timeout\n"); 4793 ieee80211_runtask(ic, &sc->sc_reinit_task); 4794 return; 4795 } 4796 } 4797 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4798 } 4799 4800 static int 4801 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4802 { 4803 struct iwn_softc *sc = ifp->if_softc; 4804 struct ieee80211com *ic = ifp->if_l2com; 4805 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4806 struct ifreq *ifr = (struct ifreq *) data; 4807 int error = 0, startall = 0, stop = 0; 4808 4809 switch (cmd) { 4810 case SIOCGIFADDR: 4811 error = ether_ioctl(ifp, cmd, data); 4812 break; 4813 case SIOCSIFFLAGS: 4814 IWN_LOCK(sc); 4815 if (ifp->if_flags & IFF_UP) { 4816 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4817 iwn_init_locked(sc); 4818 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4819 startall = 1; 4820 else 4821 stop = 1; 4822 } 4823 } else { 4824 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4825 iwn_stop_locked(sc); 4826 } 4827 IWN_UNLOCK(sc); 4828 if (startall) 4829 ieee80211_start_all(ic); 4830 else if (vap != NULL && stop) 4831 ieee80211_stop(vap); 4832 break; 4833 case SIOCGIFMEDIA: 4834 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4835 break; 4836 case SIOCGIWNSTATS: 4837 IWN_LOCK(sc); 4838 /* XXX validate permissions/memory/etc? */ 4839 error = copyout(&sc->last_stat, ifr->ifr_data, 4840 sizeof(struct iwn_stats)); 4841 IWN_UNLOCK(sc); 4842 break; 4843 case SIOCZIWNSTATS: 4844 IWN_LOCK(sc); 4845 memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); 4846 IWN_UNLOCK(sc); 4847 error = 0; 4848 break; 4849 default: 4850 error = EINVAL; 4851 break; 4852 } 4853 return error; 4854 } 4855 4856 /* 4857 * Send a command to the firmware. 4858 */ 4859 static int 4860 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4861 { 4862 struct iwn_tx_ring *ring; 4863 struct iwn_tx_desc *desc; 4864 struct iwn_tx_data *data; 4865 struct iwn_tx_cmd *cmd; 4866 struct mbuf *m; 4867 bus_addr_t paddr; 4868 int totlen, error; 4869 int cmd_queue_num; 4870 4871 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4872 4873 if (async == 0) 4874 IWN_LOCK_ASSERT(sc); 4875 4876 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4877 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4878 else 4879 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4880 4881 ring = &sc->txq[cmd_queue_num]; 4882 desc = &ring->desc[ring->cur]; 4883 data = &ring->data[ring->cur]; 4884 totlen = 4 + size; 4885 4886 if (size > sizeof cmd->data) { 4887 /* Command is too large to fit in a descriptor. */ 4888 if (totlen > MCLBYTES) 4889 return EINVAL; 4890 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4891 if (m == NULL) 4892 return ENOMEM; 4893 cmd = mtod(m, struct iwn_tx_cmd *); 4894 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4895 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4896 if (error != 0) { 4897 m_freem(m); 4898 return error; 4899 } 4900 data->m = m; 4901 } else { 4902 cmd = &ring->cmd[ring->cur]; 4903 paddr = data->cmd_paddr; 4904 } 4905 4906 cmd->code = code; 4907 cmd->flags = 0; 4908 cmd->qid = ring->qid; 4909 cmd->idx = ring->cur; 4910 memcpy(cmd->data, buf, size); 4911 4912 desc->nsegs = 1; 4913 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4914 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4915 4916 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4917 __func__, iwn_intr_str(cmd->code), cmd->code, 4918 cmd->flags, cmd->qid, cmd->idx); 4919 4920 if (size > sizeof cmd->data) { 4921 bus_dmamap_sync(ring->data_dmat, data->map, 4922 BUS_DMASYNC_PREWRITE); 4923 } else { 4924 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4925 BUS_DMASYNC_PREWRITE); 4926 } 4927 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4928 BUS_DMASYNC_PREWRITE); 4929 4930 /* Kick command ring. */ 4931 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4932 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4933 4934 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4935 4936 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4937 } 4938 4939 static int 4940 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4941 { 4942 struct iwn4965_node_info hnode; 4943 caddr_t src, dst; 4944 4945 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4946 4947 /* 4948 * We use the node structure for 5000 Series internally (it is 4949 * a superset of the one for 4965AGN). We thus copy the common 4950 * fields before sending the command. 4951 */ 4952 src = (caddr_t)node; 4953 dst = (caddr_t)&hnode; 4954 memcpy(dst, src, 48); 4955 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4956 memcpy(dst + 48, src + 72, 20); 4957 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4958 } 4959 4960 static int 4961 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4962 { 4963 4964 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4965 4966 /* Direct mapping. */ 4967 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4968 } 4969 4970 static int 4971 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4972 { 4973 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4974 struct iwn_node *wn = (void *)ni; 4975 struct ieee80211_rateset *rs; 4976 struct iwn_cmd_link_quality linkq; 4977 uint8_t txant; 4978 int i, rate, txrate; 4979 int is_11n; 4980 4981 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4982 4983 /* Use the first valid TX antenna. */ 4984 txant = IWN_LSB(sc->txchainmask); 4985 4986 memset(&linkq, 0, sizeof linkq); 4987 linkq.id = wn->id; 4988 linkq.antmsk_1stream = txant; 4989 4990 /* 4991 * The '2 stream' setup is a bit .. odd. 4992 * 4993 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4994 * the firmware panics (eg Intel 5100.) 4995 * 4996 * For NICs that support two antennas, we use ANT_AB. 4997 * 4998 * For NICs that support three antennas, we use the two that 4999 * wasn't the default one. 5000 * 5001 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 5002 * this to only one antenna. 5003 */ 5004 5005 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 5006 5007 /* Default - transmit on the other antennas */ 5008 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 5009 5010 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 5011 if (linkq.antmsk_2stream == 0) 5012 linkq.antmsk_2stream = IWN_ANT_AB; 5013 5014 /* 5015 * If the NIC is a two-stream TX NIC, configure the TX mask to 5016 * the default chainmask 5017 */ 5018 else if (sc->ntxchains == 2) 5019 linkq.antmsk_2stream = sc->txchainmask; 5020 5021 linkq.ampdu_max = 32; /* XXX negotiated? */ 5022 linkq.ampdu_threshold = 3; 5023 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5024 5025 DPRINTF(sc, IWN_DEBUG_XMIT, 5026 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 5027 __func__, 5028 linkq.antmsk_1stream, 5029 linkq.antmsk_2stream, 5030 sc->ntxchains); 5031 5032 /* 5033 * Are we using 11n rates? Ensure the channel is 5034 * 11n _and_ we have some 11n rates, or don't 5035 * try. 5036 */ 5037 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 5038 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 5039 is_11n = 1; 5040 } else { 5041 rs = &ni->ni_rates; 5042 is_11n = 0; 5043 } 5044 5045 /* Start at highest available bit-rate. */ 5046 /* 5047 * XXX this is all very dirty! 5048 */ 5049 if (is_11n) 5050 txrate = ni->ni_htrates.rs_nrates - 1; 5051 else 5052 txrate = rs->rs_nrates - 1; 5053 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 5054 uint32_t plcp; 5055 5056 if (is_11n) 5057 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 5058 else 5059 rate = RV(rs->rs_rates[txrate]); 5060 5061 DPRINTF(sc, IWN_DEBUG_XMIT, 5062 "%s: i=%d, txrate=%d, rate=0x%02x\n", 5063 __func__, 5064 i, 5065 txrate, 5066 rate); 5067 5068 /* Do rate -> PLCP config mapping */ 5069 plcp = iwn_rate_to_plcp(sc, ni, rate); 5070 linkq.retry[i] = plcp; 5071 5072 /* 5073 * The mimo field is an index into the table which 5074 * indicates the first index where it and subsequent entries 5075 * will not be using MIMO. 5076 * 5077 * Since we're filling linkq from 0..15 and we're filling 5078 * from the higest MCS rates to the lowest rates, if we 5079 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 5080 * the next entry.) That way if the next entry is a non-MIMO 5081 * entry, we're already pointing at it. 5082 */ 5083 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 5084 RV(le32toh(plcp)) > 7) 5085 linkq.mimo = i + 1; 5086 5087 /* Next retry at immediate lower bit-rate. */ 5088 if (txrate > 0) 5089 txrate--; 5090 } 5091 5092 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5093 5094 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 5095 #undef RV 5096 } 5097 5098 /* 5099 * Broadcast node is used to send group-addressed and management frames. 5100 */ 5101 static int 5102 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 5103 { 5104 struct iwn_ops *ops = &sc->ops; 5105 struct ifnet *ifp = sc->sc_ifp; 5106 struct ieee80211com *ic = ifp->if_l2com; 5107 struct iwn_node_info node; 5108 struct iwn_cmd_link_quality linkq; 5109 uint8_t txant; 5110 int i, error; 5111 5112 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5113 5114 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5115 5116 memset(&node, 0, sizeof node); 5117 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 5118 node.id = sc->broadcast_id; 5119 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 5120 if ((error = ops->add_node(sc, &node, async)) != 0) 5121 return error; 5122 5123 /* Use the first valid TX antenna. */ 5124 txant = IWN_LSB(sc->txchainmask); 5125 5126 memset(&linkq, 0, sizeof linkq); 5127 linkq.id = sc->broadcast_id; 5128 linkq.antmsk_1stream = txant; 5129 linkq.antmsk_2stream = IWN_ANT_AB; 5130 linkq.ampdu_max = 64; 5131 linkq.ampdu_threshold = 3; 5132 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5133 5134 /* Use lowest mandatory bit-rate. */ 5135 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 5136 linkq.retry[0] = htole32(0xd); 5137 else 5138 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5139 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5140 /* Use same bit-rate for all TX retries. */ 5141 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5142 linkq.retry[i] = linkq.retry[0]; 5143 } 5144 5145 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5146 5147 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5148 } 5149 5150 static int 5151 iwn_updateedca(struct ieee80211com *ic) 5152 { 5153 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5154 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5155 struct iwn_edca_params cmd; 5156 int aci; 5157 5158 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5159 5160 memset(&cmd, 0, sizeof cmd); 5161 cmd.flags = htole32(IWN_EDCA_UPDATE); 5162 for (aci = 0; aci < WME_NUM_AC; aci++) { 5163 const struct wmeParams *ac = 5164 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5165 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5166 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5167 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5168 cmd.ac[aci].txoplimit = 5169 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5170 } 5171 IEEE80211_UNLOCK(ic); 5172 IWN_LOCK(sc); 5173 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5174 IWN_UNLOCK(sc); 5175 IEEE80211_LOCK(ic); 5176 5177 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5178 5179 return 0; 5180 #undef IWN_EXP2 5181 } 5182 5183 static void 5184 iwn_update_mcast(struct ifnet *ifp) 5185 { 5186 /* Ignore */ 5187 } 5188 5189 static void 5190 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5191 { 5192 struct iwn_cmd_led led; 5193 5194 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5195 5196 #if 0 5197 /* XXX don't set LEDs during scan? */ 5198 if (sc->sc_is_scanning) 5199 return; 5200 #endif 5201 5202 /* Clear microcode LED ownership. */ 5203 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5204 5205 led.which = which; 5206 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5207 led.off = off; 5208 led.on = on; 5209 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5210 } 5211 5212 /* 5213 * Set the critical temperature at which the firmware will stop the radio 5214 * and notify us. 5215 */ 5216 static int 5217 iwn_set_critical_temp(struct iwn_softc *sc) 5218 { 5219 struct iwn_critical_temp crit; 5220 int32_t temp; 5221 5222 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5223 5224 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5225 5226 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5227 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5228 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5229 temp = IWN_CTOK(110); 5230 else 5231 temp = 110; 5232 memset(&crit, 0, sizeof crit); 5233 crit.tempR = htole32(temp); 5234 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5235 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5236 } 5237 5238 static int 5239 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5240 { 5241 struct iwn_cmd_timing cmd; 5242 uint64_t val, mod; 5243 5244 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5245 5246 memset(&cmd, 0, sizeof cmd); 5247 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5248 cmd.bintval = htole16(ni->ni_intval); 5249 cmd.lintval = htole16(10); 5250 5251 /* Compute remaining time until next beacon. */ 5252 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5253 mod = le64toh(cmd.tstamp) % val; 5254 cmd.binitval = htole32((uint32_t)(val - mod)); 5255 5256 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5257 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5258 5259 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5260 } 5261 5262 static void 5263 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5264 { 5265 struct ifnet *ifp = sc->sc_ifp; 5266 struct ieee80211com *ic = ifp->if_l2com; 5267 5268 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5269 5270 /* Adjust TX power if need be (delta >= 3 degC). */ 5271 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5272 __func__, sc->temp, temp); 5273 if (abs(temp - sc->temp) >= 3) { 5274 /* Record temperature of last calibration. */ 5275 sc->temp = temp; 5276 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5277 } 5278 } 5279 5280 /* 5281 * Set TX power for current channel (each rate has its own power settings). 5282 * This function takes into account the regulatory information from EEPROM, 5283 * the current temperature and the current voltage. 5284 */ 5285 static int 5286 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5287 int async) 5288 { 5289 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5290 #define fdivround(a, b, n) \ 5291 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5292 /* Linear interpolation. */ 5293 #define interpolate(x, x1, y1, x2, y2, n) \ 5294 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5295 5296 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5297 struct iwn_ucode_info *uc = &sc->ucode_info; 5298 struct iwn4965_cmd_txpower cmd; 5299 struct iwn4965_eeprom_chan_samples *chans; 5300 const uint8_t *rf_gain, *dsp_gain; 5301 int32_t vdiff, tdiff; 5302 int i, c, grp, maxpwr; 5303 uint8_t chan; 5304 5305 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5306 /* Retrieve current channel from last RXON. */ 5307 chan = sc->rxon->chan; 5308 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5309 chan); 5310 5311 memset(&cmd, 0, sizeof cmd); 5312 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5313 cmd.chan = chan; 5314 5315 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5316 maxpwr = sc->maxpwr5GHz; 5317 rf_gain = iwn4965_rf_gain_5ghz; 5318 dsp_gain = iwn4965_dsp_gain_5ghz; 5319 } else { 5320 maxpwr = sc->maxpwr2GHz; 5321 rf_gain = iwn4965_rf_gain_2ghz; 5322 dsp_gain = iwn4965_dsp_gain_2ghz; 5323 } 5324 5325 /* Compute voltage compensation. */ 5326 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5327 if (vdiff > 0) 5328 vdiff *= 2; 5329 if (abs(vdiff) > 2) 5330 vdiff = 0; 5331 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5332 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5333 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5334 5335 /* Get channel attenuation group. */ 5336 if (chan <= 20) /* 1-20 */ 5337 grp = 4; 5338 else if (chan <= 43) /* 34-43 */ 5339 grp = 0; 5340 else if (chan <= 70) /* 44-70 */ 5341 grp = 1; 5342 else if (chan <= 124) /* 71-124 */ 5343 grp = 2; 5344 else /* 125-200 */ 5345 grp = 3; 5346 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5347 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5348 5349 /* Get channel sub-band. */ 5350 for (i = 0; i < IWN_NBANDS; i++) 5351 if (sc->bands[i].lo != 0 && 5352 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5353 break; 5354 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5355 return EINVAL; 5356 chans = sc->bands[i].chans; 5357 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5358 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5359 5360 for (c = 0; c < 2; c++) { 5361 uint8_t power, gain, temp; 5362 int maxchpwr, pwr, ridx, idx; 5363 5364 power = interpolate(chan, 5365 chans[0].num, chans[0].samples[c][1].power, 5366 chans[1].num, chans[1].samples[c][1].power, 1); 5367 gain = interpolate(chan, 5368 chans[0].num, chans[0].samples[c][1].gain, 5369 chans[1].num, chans[1].samples[c][1].gain, 1); 5370 temp = interpolate(chan, 5371 chans[0].num, chans[0].samples[c][1].temp, 5372 chans[1].num, chans[1].samples[c][1].temp, 1); 5373 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5374 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5375 __func__, c, power, gain, temp); 5376 5377 /* Compute temperature compensation. */ 5378 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5379 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5380 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5381 __func__, tdiff, sc->temp, temp); 5382 5383 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5384 /* Convert dBm to half-dBm. */ 5385 maxchpwr = sc->maxpwr[chan] * 2; 5386 if ((ridx / 8) & 1) 5387 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5388 5389 pwr = maxpwr; 5390 5391 /* Adjust TX power based on rate. */ 5392 if ((ridx % 8) == 5) 5393 pwr -= 15; /* OFDM48: -7.5dB */ 5394 else if ((ridx % 8) == 6) 5395 pwr -= 17; /* OFDM54: -8.5dB */ 5396 else if ((ridx % 8) == 7) 5397 pwr -= 20; /* OFDM60: -10dB */ 5398 else 5399 pwr -= 10; /* Others: -5dB */ 5400 5401 /* Do not exceed channel max TX power. */ 5402 if (pwr > maxchpwr) 5403 pwr = maxchpwr; 5404 5405 idx = gain - (pwr - power) - tdiff - vdiff; 5406 if ((ridx / 8) & 1) /* MIMO */ 5407 idx += (int32_t)le32toh(uc->atten[grp][c]); 5408 5409 if (cmd.band == 0) 5410 idx += 9; /* 5GHz */ 5411 if (ridx == IWN_RIDX_MAX) 5412 idx += 5; /* CCK */ 5413 5414 /* Make sure idx stays in a valid range. */ 5415 if (idx < 0) 5416 idx = 0; 5417 else if (idx > IWN4965_MAX_PWR_INDEX) 5418 idx = IWN4965_MAX_PWR_INDEX; 5419 5420 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5421 "%s: Tx chain %d, rate idx %d: power=%d\n", 5422 __func__, c, ridx, idx); 5423 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5424 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5425 } 5426 } 5427 5428 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5429 "%s: set tx power for chan %d\n", __func__, chan); 5430 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5431 5432 #undef interpolate 5433 #undef fdivround 5434 } 5435 5436 static int 5437 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5438 int async) 5439 { 5440 struct iwn5000_cmd_txpower cmd; 5441 5442 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5443 5444 /* 5445 * TX power calibration is handled automatically by the firmware 5446 * for 5000 Series. 5447 */ 5448 memset(&cmd, 0, sizeof cmd); 5449 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5450 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5451 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5452 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5453 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5454 } 5455 5456 /* 5457 * Retrieve the maximum RSSI (in dBm) among receivers. 5458 */ 5459 static int 5460 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5461 { 5462 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5463 uint8_t mask, agc; 5464 int rssi; 5465 5466 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5467 5468 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5469 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5470 5471 rssi = 0; 5472 if (mask & IWN_ANT_A) 5473 rssi = MAX(rssi, phy->rssi[0]); 5474 if (mask & IWN_ANT_B) 5475 rssi = MAX(rssi, phy->rssi[2]); 5476 if (mask & IWN_ANT_C) 5477 rssi = MAX(rssi, phy->rssi[4]); 5478 5479 DPRINTF(sc, IWN_DEBUG_RECV, 5480 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5481 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5482 rssi - agc - IWN_RSSI_TO_DBM); 5483 return rssi - agc - IWN_RSSI_TO_DBM; 5484 } 5485 5486 static int 5487 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5488 { 5489 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5490 uint8_t agc; 5491 int rssi; 5492 5493 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5494 5495 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5496 5497 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5498 le16toh(phy->rssi[1]) & 0xff); 5499 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5500 5501 DPRINTF(sc, IWN_DEBUG_RECV, 5502 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5503 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5504 rssi - agc - IWN_RSSI_TO_DBM); 5505 return rssi - agc - IWN_RSSI_TO_DBM; 5506 } 5507 5508 /* 5509 * Retrieve the average noise (in dBm) among receivers. 5510 */ 5511 static int 5512 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5513 { 5514 int i, total, nbant, noise; 5515 5516 total = nbant = 0; 5517 for (i = 0; i < 3; i++) { 5518 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5519 continue; 5520 total += noise; 5521 nbant++; 5522 } 5523 /* There should be at least one antenna but check anyway. */ 5524 return (nbant == 0) ? -127 : (total / nbant) - 107; 5525 } 5526 5527 /* 5528 * Compute temperature (in degC) from last received statistics. 5529 */ 5530 static int 5531 iwn4965_get_temperature(struct iwn_softc *sc) 5532 { 5533 struct iwn_ucode_info *uc = &sc->ucode_info; 5534 int32_t r1, r2, r3, r4, temp; 5535 5536 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5537 5538 r1 = le32toh(uc->temp[0].chan20MHz); 5539 r2 = le32toh(uc->temp[1].chan20MHz); 5540 r3 = le32toh(uc->temp[2].chan20MHz); 5541 r4 = le32toh(sc->rawtemp); 5542 5543 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5544 return 0; 5545 5546 /* Sign-extend 23-bit R4 value to 32-bit. */ 5547 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5548 /* Compute temperature in Kelvin. */ 5549 temp = (259 * (r4 - r2)) / (r3 - r1); 5550 temp = (temp * 97) / 100 + 8; 5551 5552 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5553 IWN_KTOC(temp)); 5554 return IWN_KTOC(temp); 5555 } 5556 5557 static int 5558 iwn5000_get_temperature(struct iwn_softc *sc) 5559 { 5560 int32_t temp; 5561 5562 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5563 5564 /* 5565 * Temperature is not used by the driver for 5000 Series because 5566 * TX power calibration is handled by firmware. 5567 */ 5568 temp = le32toh(sc->rawtemp); 5569 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5570 temp = (temp / -5) + sc->temp_off; 5571 temp = IWN_KTOC(temp); 5572 } 5573 return temp; 5574 } 5575 5576 /* 5577 * Initialize sensitivity calibration state machine. 5578 */ 5579 static int 5580 iwn_init_sensitivity(struct iwn_softc *sc) 5581 { 5582 struct iwn_ops *ops = &sc->ops; 5583 struct iwn_calib_state *calib = &sc->calib; 5584 uint32_t flags; 5585 int error; 5586 5587 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5588 5589 /* Reset calibration state machine. */ 5590 memset(calib, 0, sizeof (*calib)); 5591 calib->state = IWN_CALIB_STATE_INIT; 5592 calib->cck_state = IWN_CCK_STATE_HIFA; 5593 /* Set initial correlation values. */ 5594 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5595 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5596 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5597 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5598 calib->cck_x4 = 125; 5599 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5600 calib->energy_cck = sc->limits->energy_cck; 5601 5602 /* Write initial sensitivity. */ 5603 if ((error = iwn_send_sensitivity(sc)) != 0) 5604 return error; 5605 5606 /* Write initial gains. */ 5607 if ((error = ops->init_gains(sc)) != 0) 5608 return error; 5609 5610 /* Request statistics at each beacon interval. */ 5611 flags = 0; 5612 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5613 __func__); 5614 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5615 } 5616 5617 /* 5618 * Collect noise and RSSI statistics for the first 20 beacons received 5619 * after association and use them to determine connected antennas and 5620 * to set differential gains. 5621 */ 5622 static void 5623 iwn_collect_noise(struct iwn_softc *sc, 5624 const struct iwn_rx_general_stats *stats) 5625 { 5626 struct iwn_ops *ops = &sc->ops; 5627 struct iwn_calib_state *calib = &sc->calib; 5628 struct ifnet *ifp = sc->sc_ifp; 5629 struct ieee80211com *ic = ifp->if_l2com; 5630 uint32_t val; 5631 int i; 5632 5633 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5634 5635 /* Accumulate RSSI and noise for all 3 antennas. */ 5636 for (i = 0; i < 3; i++) { 5637 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5638 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5639 } 5640 /* NB: We update differential gains only once after 20 beacons. */ 5641 if (++calib->nbeacons < 20) 5642 return; 5643 5644 /* Determine highest average RSSI. */ 5645 val = MAX(calib->rssi[0], calib->rssi[1]); 5646 val = MAX(calib->rssi[2], val); 5647 5648 /* Determine which antennas are connected. */ 5649 sc->chainmask = sc->rxchainmask; 5650 for (i = 0; i < 3; i++) 5651 if (val - calib->rssi[i] > 15 * 20) 5652 sc->chainmask &= ~(1 << i); 5653 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5654 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5655 __func__, sc->rxchainmask, sc->chainmask); 5656 5657 /* If none of the TX antennas are connected, keep at least one. */ 5658 if ((sc->chainmask & sc->txchainmask) == 0) 5659 sc->chainmask |= IWN_LSB(sc->txchainmask); 5660 5661 (void)ops->set_gains(sc); 5662 calib->state = IWN_CALIB_STATE_RUN; 5663 5664 #ifdef notyet 5665 /* XXX Disable RX chains with no antennas connected. */ 5666 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5667 if (sc->sc_is_scanning) 5668 device_printf(sc->sc_dev, 5669 "%s: is_scanning set, before RXON\n", 5670 __func__); 5671 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5672 #endif 5673 5674 /* Enable power-saving mode if requested by user. */ 5675 if (ic->ic_flags & IEEE80211_F_PMGTON) 5676 (void)iwn_set_pslevel(sc, 0, 3, 1); 5677 5678 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5679 5680 } 5681 5682 static int 5683 iwn4965_init_gains(struct iwn_softc *sc) 5684 { 5685 struct iwn_phy_calib_gain cmd; 5686 5687 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5688 5689 memset(&cmd, 0, sizeof cmd); 5690 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5691 /* Differential gains initially set to 0 for all 3 antennas. */ 5692 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5693 "%s: setting initial differential gains\n", __func__); 5694 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5695 } 5696 5697 static int 5698 iwn5000_init_gains(struct iwn_softc *sc) 5699 { 5700 struct iwn_phy_calib cmd; 5701 5702 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5703 5704 memset(&cmd, 0, sizeof cmd); 5705 cmd.code = sc->reset_noise_gain; 5706 cmd.ngroups = 1; 5707 cmd.isvalid = 1; 5708 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5709 "%s: setting initial differential gains\n", __func__); 5710 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5711 } 5712 5713 static int 5714 iwn4965_set_gains(struct iwn_softc *sc) 5715 { 5716 struct iwn_calib_state *calib = &sc->calib; 5717 struct iwn_phy_calib_gain cmd; 5718 int i, delta, noise; 5719 5720 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5721 5722 /* Get minimal noise among connected antennas. */ 5723 noise = INT_MAX; /* NB: There's at least one antenna. */ 5724 for (i = 0; i < 3; i++) 5725 if (sc->chainmask & (1 << i)) 5726 noise = MIN(calib->noise[i], noise); 5727 5728 memset(&cmd, 0, sizeof cmd); 5729 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5730 /* Set differential gains for connected antennas. */ 5731 for (i = 0; i < 3; i++) { 5732 if (sc->chainmask & (1 << i)) { 5733 /* Compute attenuation (in unit of 1.5dB). */ 5734 delta = (noise - (int32_t)calib->noise[i]) / 30; 5735 /* NB: delta <= 0 */ 5736 /* Limit to [-4.5dB,0]. */ 5737 cmd.gain[i] = MIN(abs(delta), 3); 5738 if (delta < 0) 5739 cmd.gain[i] |= 1 << 2; /* sign bit */ 5740 } 5741 } 5742 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5743 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5744 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5745 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5746 } 5747 5748 static int 5749 iwn5000_set_gains(struct iwn_softc *sc) 5750 { 5751 struct iwn_calib_state *calib = &sc->calib; 5752 struct iwn_phy_calib_gain cmd; 5753 int i, ant, div, delta; 5754 5755 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5756 5757 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5758 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5759 5760 memset(&cmd, 0, sizeof cmd); 5761 cmd.code = sc->noise_gain; 5762 cmd.ngroups = 1; 5763 cmd.isvalid = 1; 5764 /* Get first available RX antenna as referential. */ 5765 ant = IWN_LSB(sc->rxchainmask); 5766 /* Set differential gains for other antennas. */ 5767 for (i = ant + 1; i < 3; i++) { 5768 if (sc->chainmask & (1 << i)) { 5769 /* The delta is relative to antenna "ant". */ 5770 delta = ((int32_t)calib->noise[ant] - 5771 (int32_t)calib->noise[i]) / div; 5772 /* Limit to [-4.5dB,+4.5dB]. */ 5773 cmd.gain[i - 1] = MIN(abs(delta), 3); 5774 if (delta < 0) 5775 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5776 } 5777 } 5778 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5779 "setting differential gains Ant B/C: %x/%x (%x)\n", 5780 cmd.gain[0], cmd.gain[1], sc->chainmask); 5781 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5782 } 5783 5784 /* 5785 * Tune RF RX sensitivity based on the number of false alarms detected 5786 * during the last beacon period. 5787 */ 5788 static void 5789 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5790 { 5791 #define inc(val, inc, max) \ 5792 if ((val) < (max)) { \ 5793 if ((val) < (max) - (inc)) \ 5794 (val) += (inc); \ 5795 else \ 5796 (val) = (max); \ 5797 needs_update = 1; \ 5798 } 5799 #define dec(val, dec, min) \ 5800 if ((val) > (min)) { \ 5801 if ((val) > (min) + (dec)) \ 5802 (val) -= (dec); \ 5803 else \ 5804 (val) = (min); \ 5805 needs_update = 1; \ 5806 } 5807 5808 const struct iwn_sensitivity_limits *limits = sc->limits; 5809 struct iwn_calib_state *calib = &sc->calib; 5810 uint32_t val, rxena, fa; 5811 uint32_t energy[3], energy_min; 5812 uint8_t noise[3], noise_ref; 5813 int i, needs_update = 0; 5814 5815 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5816 5817 /* Check that we've been enabled long enough. */ 5818 if ((rxena = le32toh(stats->general.load)) == 0){ 5819 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5820 return; 5821 } 5822 5823 /* Compute number of false alarms since last call for OFDM. */ 5824 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5825 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5826 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5827 5828 if (fa > 50 * rxena) { 5829 /* High false alarm count, decrease sensitivity. */ 5830 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5831 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5832 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5833 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5834 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5835 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5836 5837 } else if (fa < 5 * rxena) { 5838 /* Low false alarm count, increase sensitivity. */ 5839 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5840 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5841 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5842 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5843 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5844 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5845 } 5846 5847 /* Compute maximum noise among 3 receivers. */ 5848 for (i = 0; i < 3; i++) 5849 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5850 val = MAX(noise[0], noise[1]); 5851 val = MAX(noise[2], val); 5852 /* Insert it into our samples table. */ 5853 calib->noise_samples[calib->cur_noise_sample] = val; 5854 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5855 5856 /* Compute maximum noise among last 20 samples. */ 5857 noise_ref = calib->noise_samples[0]; 5858 for (i = 1; i < 20; i++) 5859 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5860 5861 /* Compute maximum energy among 3 receivers. */ 5862 for (i = 0; i < 3; i++) 5863 energy[i] = le32toh(stats->general.energy[i]); 5864 val = MIN(energy[0], energy[1]); 5865 val = MIN(energy[2], val); 5866 /* Insert it into our samples table. */ 5867 calib->energy_samples[calib->cur_energy_sample] = val; 5868 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5869 5870 /* Compute minimum energy among last 10 samples. */ 5871 energy_min = calib->energy_samples[0]; 5872 for (i = 1; i < 10; i++) 5873 energy_min = MAX(energy_min, calib->energy_samples[i]); 5874 energy_min += 6; 5875 5876 /* Compute number of false alarms since last call for CCK. */ 5877 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5878 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5879 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5880 5881 if (fa > 50 * rxena) { 5882 /* High false alarm count, decrease sensitivity. */ 5883 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5884 "%s: CCK high false alarm count: %u\n", __func__, fa); 5885 calib->cck_state = IWN_CCK_STATE_HIFA; 5886 calib->low_fa = 0; 5887 5888 if (calib->cck_x4 > 160) { 5889 calib->noise_ref = noise_ref; 5890 if (calib->energy_cck > 2) 5891 dec(calib->energy_cck, 2, energy_min); 5892 } 5893 if (calib->cck_x4 < 160) { 5894 calib->cck_x4 = 161; 5895 needs_update = 1; 5896 } else 5897 inc(calib->cck_x4, 3, limits->max_cck_x4); 5898 5899 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5900 5901 } else if (fa < 5 * rxena) { 5902 /* Low false alarm count, increase sensitivity. */ 5903 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5904 "%s: CCK low false alarm count: %u\n", __func__, fa); 5905 calib->cck_state = IWN_CCK_STATE_LOFA; 5906 calib->low_fa++; 5907 5908 if (calib->cck_state != IWN_CCK_STATE_INIT && 5909 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5910 calib->low_fa > 100)) { 5911 inc(calib->energy_cck, 2, limits->min_energy_cck); 5912 dec(calib->cck_x4, 3, limits->min_cck_x4); 5913 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5914 } 5915 } else { 5916 /* Not worth to increase or decrease sensitivity. */ 5917 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5918 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5919 calib->low_fa = 0; 5920 calib->noise_ref = noise_ref; 5921 5922 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5923 /* Previous interval had many false alarms. */ 5924 dec(calib->energy_cck, 8, energy_min); 5925 } 5926 calib->cck_state = IWN_CCK_STATE_INIT; 5927 } 5928 5929 if (needs_update) 5930 (void)iwn_send_sensitivity(sc); 5931 5932 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5933 5934 #undef dec 5935 #undef inc 5936 } 5937 5938 static int 5939 iwn_send_sensitivity(struct iwn_softc *sc) 5940 { 5941 struct iwn_calib_state *calib = &sc->calib; 5942 struct iwn_enhanced_sensitivity_cmd cmd; 5943 int len; 5944 5945 memset(&cmd, 0, sizeof cmd); 5946 len = sizeof (struct iwn_sensitivity_cmd); 5947 cmd.which = IWN_SENSITIVITY_WORKTBL; 5948 /* OFDM modulation. */ 5949 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5950 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5951 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5952 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5953 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5954 cmd.energy_ofdm_th = htole16(62); 5955 /* CCK modulation. */ 5956 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5957 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5958 cmd.energy_cck = htole16(calib->energy_cck); 5959 /* Barker modulation: use default values. */ 5960 cmd.corr_barker = htole16(190); 5961 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 5962 5963 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5964 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5965 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5966 calib->ofdm_mrc_x4, calib->cck_x4, 5967 calib->cck_mrc_x4, calib->energy_cck); 5968 5969 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5970 goto send; 5971 /* Enhanced sensitivity settings. */ 5972 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5973 cmd.ofdm_det_slope_mrc = htole16(668); 5974 cmd.ofdm_det_icept_mrc = htole16(4); 5975 cmd.ofdm_det_slope = htole16(486); 5976 cmd.ofdm_det_icept = htole16(37); 5977 cmd.cck_det_slope_mrc = htole16(853); 5978 cmd.cck_det_icept_mrc = htole16(4); 5979 cmd.cck_det_slope = htole16(476); 5980 cmd.cck_det_icept = htole16(99); 5981 send: 5982 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5983 } 5984 5985 /* 5986 * Look at the increase of PLCP errors over time; if it exceeds 5987 * a programmed threshold then trigger an RF retune. 5988 */ 5989 static void 5990 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 5991 { 5992 int32_t delta_ofdm, delta_ht, delta_cck; 5993 struct iwn_calib_state *calib = &sc->calib; 5994 int delta_ticks, cur_ticks; 5995 int delta_msec; 5996 int thresh; 5997 5998 /* 5999 * Calculate the difference between the current and 6000 * previous statistics. 6001 */ 6002 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 6003 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6004 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 6005 6006 /* 6007 * Calculate the delta in time between successive statistics 6008 * messages. Yes, it can roll over; so we make sure that 6009 * this doesn't happen. 6010 * 6011 * XXX go figure out what to do about rollover 6012 * XXX go figure out what to do if ticks rolls over to -ve instead! 6013 * XXX go stab signed integer overflow undefined-ness in the face. 6014 */ 6015 cur_ticks = ticks; 6016 delta_ticks = cur_ticks - sc->last_calib_ticks; 6017 6018 /* 6019 * If any are negative, then the firmware likely reset; so just 6020 * bail. We'll pick this up next time. 6021 */ 6022 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 6023 return; 6024 6025 /* 6026 * delta_ticks is in ticks; we need to convert it up to milliseconds 6027 * so we can do some useful math with it. 6028 */ 6029 delta_msec = ticks_to_msecs(delta_ticks); 6030 6031 /* 6032 * Calculate what our threshold is given the current delta_msec. 6033 */ 6034 thresh = sc->base_params->plcp_err_threshold * delta_msec; 6035 6036 DPRINTF(sc, IWN_DEBUG_STATE, 6037 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 6038 __func__, 6039 delta_msec, 6040 delta_cck, 6041 delta_ofdm, 6042 delta_ht, 6043 (delta_msec + delta_cck + delta_ofdm + delta_ht), 6044 thresh); 6045 6046 /* 6047 * If we need a retune, then schedule a single channel scan 6048 * to a channel that isn't the currently active one! 6049 * 6050 * The math from linux iwlwifi: 6051 * 6052 * if ((delta * 100 / msecs) > threshold) 6053 */ 6054 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 6055 DPRINTF(sc, IWN_DEBUG_ANY, 6056 "%s: PLCP error threshold raw (%d) comparison (%d) " 6057 "over limit (%d); retune!\n", 6058 __func__, 6059 (delta_cck + delta_ofdm + delta_ht), 6060 (delta_cck + delta_ofdm + delta_ht) * 100, 6061 thresh); 6062 } 6063 } 6064 6065 /* 6066 * Set STA mode power saving level (between 0 and 5). 6067 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 6068 */ 6069 static int 6070 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 6071 { 6072 struct iwn_pmgt_cmd cmd; 6073 const struct iwn_pmgt *pmgt; 6074 uint32_t max, skip_dtim; 6075 uint32_t reg; 6076 int i; 6077 6078 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 6079 "%s: dtim=%d, level=%d, async=%d\n", 6080 __func__, 6081 dtim, 6082 level, 6083 async); 6084 6085 /* Select which PS parameters to use. */ 6086 if (dtim <= 2) 6087 pmgt = &iwn_pmgt[0][level]; 6088 else if (dtim <= 10) 6089 pmgt = &iwn_pmgt[1][level]; 6090 else 6091 pmgt = &iwn_pmgt[2][level]; 6092 6093 memset(&cmd, 0, sizeof cmd); 6094 if (level != 0) /* not CAM */ 6095 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 6096 if (level == 5) 6097 cmd.flags |= htole16(IWN_PS_FAST_PD); 6098 /* Retrieve PCIe Active State Power Management (ASPM). */ 6099 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6100 if (!(reg & 0x1)) /* L0s Entry disabled. */ 6101 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 6102 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 6103 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 6104 6105 if (dtim == 0) { 6106 dtim = 1; 6107 skip_dtim = 0; 6108 } else 6109 skip_dtim = pmgt->skip_dtim; 6110 if (skip_dtim != 0) { 6111 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 6112 max = pmgt->intval[4]; 6113 if (max == (uint32_t)-1) 6114 max = dtim * (skip_dtim + 1); 6115 else if (max > dtim) 6116 max = (max / dtim) * dtim; 6117 } else 6118 max = dtim; 6119 for (i = 0; i < 5; i++) 6120 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 6121 6122 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 6123 level); 6124 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 6125 } 6126 6127 static int 6128 iwn_send_btcoex(struct iwn_softc *sc) 6129 { 6130 struct iwn_bluetooth cmd; 6131 6132 memset(&cmd, 0, sizeof cmd); 6133 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 6134 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 6135 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 6136 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 6137 __func__); 6138 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 6139 } 6140 6141 static int 6142 iwn_send_advanced_btcoex(struct iwn_softc *sc) 6143 { 6144 static const uint32_t btcoex_3wire[12] = { 6145 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 6146 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 6147 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 6148 }; 6149 struct iwn6000_btcoex_config btconfig; 6150 struct iwn2000_btcoex_config btconfig2k; 6151 struct iwn_btcoex_priotable btprio; 6152 struct iwn_btcoex_prot btprot; 6153 int error, i; 6154 uint8_t flags; 6155 6156 memset(&btconfig, 0, sizeof btconfig); 6157 memset(&btconfig2k, 0, sizeof btconfig2k); 6158 6159 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6160 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6161 6162 if (sc->base_params->bt_sco_disable) 6163 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6164 else 6165 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6166 6167 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6168 6169 /* Default flags result is 145 as old value */ 6170 6171 /* 6172 * Flags value has to be review. Values must change if we 6173 * which to disable it 6174 */ 6175 if (sc->base_params->bt_session_2) { 6176 btconfig2k.flags = flags; 6177 btconfig2k.max_kill = 5; 6178 btconfig2k.bt3_t7_timer = 1; 6179 btconfig2k.kill_ack = htole32(0xffff0000); 6180 btconfig2k.kill_cts = htole32(0xffff0000); 6181 btconfig2k.sample_time = 2; 6182 btconfig2k.bt3_t2_timer = 0xc; 6183 6184 for (i = 0; i < 12; i++) 6185 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6186 btconfig2k.valid = htole16(0xff); 6187 btconfig2k.prio_boost = htole32(0xf0); 6188 DPRINTF(sc, IWN_DEBUG_RESET, 6189 "%s: configuring advanced bluetooth coexistence" 6190 " session 2, flags : 0x%x\n", 6191 __func__, 6192 flags); 6193 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6194 sizeof(btconfig2k), 1); 6195 } else { 6196 btconfig.flags = flags; 6197 btconfig.max_kill = 5; 6198 btconfig.bt3_t7_timer = 1; 6199 btconfig.kill_ack = htole32(0xffff0000); 6200 btconfig.kill_cts = htole32(0xffff0000); 6201 btconfig.sample_time = 2; 6202 btconfig.bt3_t2_timer = 0xc; 6203 6204 for (i = 0; i < 12; i++) 6205 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6206 btconfig.valid = htole16(0xff); 6207 btconfig.prio_boost = 0xf0; 6208 DPRINTF(sc, IWN_DEBUG_RESET, 6209 "%s: configuring advanced bluetooth coexistence," 6210 " flags : 0x%x\n", 6211 __func__, 6212 flags); 6213 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6214 sizeof(btconfig), 1); 6215 } 6216 6217 if (error != 0) 6218 return error; 6219 6220 memset(&btprio, 0, sizeof btprio); 6221 btprio.calib_init1 = 0x6; 6222 btprio.calib_init2 = 0x7; 6223 btprio.calib_periodic_low1 = 0x2; 6224 btprio.calib_periodic_low2 = 0x3; 6225 btprio.calib_periodic_high1 = 0x4; 6226 btprio.calib_periodic_high2 = 0x5; 6227 btprio.dtim = 0x6; 6228 btprio.scan52 = 0x8; 6229 btprio.scan24 = 0xa; 6230 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6231 1); 6232 if (error != 0) 6233 return error; 6234 6235 /* Force BT state machine change. */ 6236 memset(&btprot, 0, sizeof btprot); 6237 btprot.open = 1; 6238 btprot.type = 1; 6239 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6240 if (error != 0) 6241 return error; 6242 btprot.open = 0; 6243 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6244 } 6245 6246 static int 6247 iwn5000_runtime_calib(struct iwn_softc *sc) 6248 { 6249 struct iwn5000_calib_config cmd; 6250 6251 memset(&cmd, 0, sizeof cmd); 6252 cmd.ucode.once.enable = 0xffffffff; 6253 cmd.ucode.once.start = IWN5000_CALIB_DC; 6254 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6255 "%s: configuring runtime calibration\n", __func__); 6256 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6257 } 6258 6259 static int 6260 iwn_config(struct iwn_softc *sc) 6261 { 6262 struct iwn_ops *ops = &sc->ops; 6263 struct ifnet *ifp = sc->sc_ifp; 6264 struct ieee80211com *ic = ifp->if_l2com; 6265 uint32_t txmask; 6266 uint16_t rxchain; 6267 int error; 6268 6269 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6270 6271 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6272 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6273 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6274 " exclusive each together. Review NIC config file. Conf" 6275 " : 0x%08x Flags : 0x%08x \n", __func__, 6276 sc->base_params->calib_need, 6277 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6278 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6279 return (EINVAL); 6280 } 6281 6282 /* Compute temperature calib if needed. Will be send by send calib */ 6283 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6284 error = iwn5000_temp_offset_calib(sc); 6285 if (error != 0) { 6286 device_printf(sc->sc_dev, 6287 "%s: could not set temperature offset\n", __func__); 6288 return (error); 6289 } 6290 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6291 error = iwn5000_temp_offset_calibv2(sc); 6292 if (error != 0) { 6293 device_printf(sc->sc_dev, 6294 "%s: could not compute temperature offset v2\n", 6295 __func__); 6296 return (error); 6297 } 6298 } 6299 6300 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6301 /* Configure runtime DC calibration. */ 6302 error = iwn5000_runtime_calib(sc); 6303 if (error != 0) { 6304 device_printf(sc->sc_dev, 6305 "%s: could not configure runtime calibration\n", 6306 __func__); 6307 return error; 6308 } 6309 } 6310 6311 /* Configure valid TX chains for >=5000 Series. */ 6312 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6313 txmask = htole32(sc->txchainmask); 6314 DPRINTF(sc, IWN_DEBUG_RESET, 6315 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6316 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6317 sizeof txmask, 0); 6318 if (error != 0) { 6319 device_printf(sc->sc_dev, 6320 "%s: could not configure valid TX chains, " 6321 "error %d\n", __func__, error); 6322 return error; 6323 } 6324 } 6325 6326 /* Configure bluetooth coexistence. */ 6327 error = 0; 6328 6329 /* Configure bluetooth coexistence if needed. */ 6330 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6331 error = iwn_send_advanced_btcoex(sc); 6332 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6333 error = iwn_send_btcoex(sc); 6334 6335 if (error != 0) { 6336 device_printf(sc->sc_dev, 6337 "%s: could not configure bluetooth coexistence, error %d\n", 6338 __func__, error); 6339 return error; 6340 } 6341 6342 /* Set mode, channel, RX filter and enable RX. */ 6343 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6344 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6345 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6346 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6347 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6348 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6349 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6350 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6351 switch (ic->ic_opmode) { 6352 case IEEE80211_M_STA: 6353 sc->rxon->mode = IWN_MODE_STA; 6354 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6355 break; 6356 case IEEE80211_M_MONITOR: 6357 sc->rxon->mode = IWN_MODE_MONITOR; 6358 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6359 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6360 break; 6361 default: 6362 /* Should not get there. */ 6363 break; 6364 } 6365 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6366 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6367 sc->rxon->ht_single_mask = 0xff; 6368 sc->rxon->ht_dual_mask = 0xff; 6369 sc->rxon->ht_triple_mask = 0xff; 6370 rxchain = 6371 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6372 IWN_RXCHAIN_MIMO_COUNT(2) | 6373 IWN_RXCHAIN_IDLE_COUNT(2); 6374 sc->rxon->rxchain = htole16(rxchain); 6375 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6376 if (sc->sc_is_scanning) 6377 device_printf(sc->sc_dev, 6378 "%s: is_scanning set, before RXON\n", 6379 __func__); 6380 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6381 if (error != 0) { 6382 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6383 __func__); 6384 return error; 6385 } 6386 6387 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6388 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6389 __func__); 6390 return error; 6391 } 6392 6393 /* Configuration has changed, set TX power accordingly. */ 6394 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6395 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6396 __func__); 6397 return error; 6398 } 6399 6400 if ((error = iwn_set_critical_temp(sc)) != 0) { 6401 device_printf(sc->sc_dev, 6402 "%s: could not set critical temperature\n", __func__); 6403 return error; 6404 } 6405 6406 /* Set power saving level to CAM during initialization. */ 6407 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6408 device_printf(sc->sc_dev, 6409 "%s: could not set power saving level\n", __func__); 6410 return error; 6411 } 6412 6413 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6414 6415 return 0; 6416 } 6417 6418 /* 6419 * Add an ssid element to a frame. 6420 */ 6421 static uint8_t * 6422 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6423 { 6424 *frm++ = IEEE80211_ELEMID_SSID; 6425 *frm++ = len; 6426 memcpy(frm, ssid, len); 6427 return frm + len; 6428 } 6429 6430 static uint16_t 6431 iwn_get_active_dwell_time(struct iwn_softc *sc, 6432 struct ieee80211_channel *c, uint8_t n_probes) 6433 { 6434 /* No channel? Default to 2GHz settings */ 6435 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6436 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6437 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6438 } 6439 6440 /* 5GHz dwell time */ 6441 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6442 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6443 } 6444 6445 /* 6446 * Limit the total dwell time to 85% of the beacon interval. 6447 * 6448 * Returns the dwell time in milliseconds. 6449 */ 6450 static uint16_t 6451 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6452 { 6453 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6454 struct ieee80211vap *vap = NULL; 6455 int bintval = 0; 6456 6457 /* bintval is in TU (1.024mS) */ 6458 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6459 vap = TAILQ_FIRST(&ic->ic_vaps); 6460 bintval = vap->iv_bss->ni_intval; 6461 } 6462 6463 /* 6464 * If it's non-zero, we should calculate the minimum of 6465 * it and the DWELL_BASE. 6466 * 6467 * XXX Yes, the math should take into account that bintval 6468 * is 1.024mS, not 1mS.. 6469 */ 6470 if (bintval > 0) { 6471 DPRINTF(sc, IWN_DEBUG_SCAN, 6472 "%s: bintval=%d\n", 6473 __func__, 6474 bintval); 6475 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6476 } 6477 6478 /* No association context? Default */ 6479 return (IWN_PASSIVE_DWELL_BASE); 6480 } 6481 6482 static uint16_t 6483 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6484 { 6485 uint16_t passive; 6486 6487 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6488 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6489 } else { 6490 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6491 } 6492 6493 /* Clamp to the beacon interval if we're associated */ 6494 return (iwn_limit_dwell(sc, passive)); 6495 } 6496 6497 static int 6498 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 6499 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 6500 { 6501 struct ifnet *ifp = sc->sc_ifp; 6502 struct ieee80211com *ic = ifp->if_l2com; 6503 struct ieee80211_node *ni = vap->iv_bss; 6504 struct iwn_scan_hdr *hdr; 6505 struct iwn_cmd_data *tx; 6506 struct iwn_scan_essid *essid; 6507 struct iwn_scan_chan *chan; 6508 struct ieee80211_frame *wh; 6509 struct ieee80211_rateset *rs; 6510 uint8_t *buf, *frm; 6511 uint16_t rxchain; 6512 uint8_t txant; 6513 int buflen, error; 6514 int is_active; 6515 uint16_t dwell_active, dwell_passive; 6516 uint32_t extra, scan_service_time; 6517 6518 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6519 6520 /* 6521 * We are absolutely not allowed to send a scan command when another 6522 * scan command is pending. 6523 */ 6524 if (sc->sc_is_scanning) { 6525 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6526 __func__); 6527 return (EAGAIN); 6528 } 6529 6530 /* Assign the scan channel */ 6531 c = ic->ic_curchan; 6532 6533 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6534 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 6535 if (buf == NULL) { 6536 device_printf(sc->sc_dev, 6537 "%s: could not allocate buffer for scan command\n", 6538 __func__); 6539 return ENOMEM; 6540 } 6541 hdr = (struct iwn_scan_hdr *)buf; 6542 /* 6543 * Move to the next channel if no frames are received within 10ms 6544 * after sending the probe request. 6545 */ 6546 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6547 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6548 /* 6549 * Max needs to be greater than active and passive and quiet! 6550 * It's also in microseconds! 6551 */ 6552 hdr->max_svc = htole32(250 * 1024); 6553 6554 /* 6555 * Reset scan: interval=100 6556 * Normal scan: interval=becaon interval 6557 * suspend_time: 100 (TU) 6558 * 6559 */ 6560 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6561 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6562 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6563 hdr->pause_svc = htole32(scan_service_time); 6564 6565 /* Select antennas for scanning. */ 6566 rxchain = 6567 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6568 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6569 IWN_RXCHAIN_DRIVER_FORCE; 6570 if (IEEE80211_IS_CHAN_A(c) && 6571 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6572 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6573 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6574 } else /* Use all available RX antennas. */ 6575 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6576 hdr->rxchain = htole16(rxchain); 6577 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6578 6579 tx = (struct iwn_cmd_data *)(hdr + 1); 6580 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6581 tx->id = sc->broadcast_id; 6582 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6583 6584 if (IEEE80211_IS_CHAN_5GHZ(c)) { 6585 /* Send probe requests at 6Mbps. */ 6586 tx->rate = htole32(0xd); 6587 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6588 } else { 6589 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6590 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6591 sc->rxon->associd && sc->rxon->chan > 14) 6592 tx->rate = htole32(0xd); 6593 else { 6594 /* Send probe requests at 1Mbps. */ 6595 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6596 } 6597 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6598 } 6599 /* Use the first valid TX antenna. */ 6600 txant = IWN_LSB(sc->txchainmask); 6601 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6602 6603 /* 6604 * Only do active scanning if we're announcing a probe request 6605 * for a given SSID (or more, if we ever add it to the driver.) 6606 */ 6607 is_active = 0; 6608 6609 /* 6610 * If we're scanning for a specific SSID, add it to the command. 6611 * 6612 * XXX maybe look at adding support for scanning multiple SSIDs? 6613 */ 6614 essid = (struct iwn_scan_essid *)(tx + 1); 6615 if (ss != NULL) { 6616 if (ss->ss_ssid[0].len != 0) { 6617 essid[0].id = IEEE80211_ELEMID_SSID; 6618 essid[0].len = ss->ss_ssid[0].len; 6619 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6620 } 6621 6622 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6623 __func__, 6624 ss->ss_ssid[0].len, 6625 ss->ss_ssid[0].len, 6626 ss->ss_ssid[0].ssid); 6627 6628 if (ss->ss_nssid > 0) 6629 is_active = 1; 6630 } 6631 6632 /* 6633 * Build a probe request frame. Most of the following code is a 6634 * copy & paste of what is done in net80211. 6635 */ 6636 wh = (struct ieee80211_frame *)(essid + 20); 6637 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6638 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6639 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6640 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6641 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6642 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6643 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6644 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6645 6646 frm = (uint8_t *)(wh + 1); 6647 frm = ieee80211_add_ssid(frm, NULL, 0); 6648 frm = ieee80211_add_rates(frm, rs); 6649 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6650 frm = ieee80211_add_xrates(frm, rs); 6651 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6652 frm = ieee80211_add_htcap(frm, ni); 6653 6654 /* Set length of probe request. */ 6655 tx->len = htole16(frm - (uint8_t *)wh); 6656 6657 /* 6658 * If active scanning is requested but a certain channel is 6659 * marked passive, we can do active scanning if we detect 6660 * transmissions. 6661 * 6662 * There is an issue with some firmware versions that triggers 6663 * a sysassert on a "good CRC threshold" of zero (== disabled), 6664 * on a radar channel even though this means that we should NOT 6665 * send probes. 6666 * 6667 * The "good CRC threshold" is the number of frames that we 6668 * need to receive during our dwell time on a channel before 6669 * sending out probes -- setting this to a huge value will 6670 * mean we never reach it, but at the same time work around 6671 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6672 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6673 * 6674 * This was fixed in later versions along with some other 6675 * scan changes, and the threshold behaves as a flag in those 6676 * versions. 6677 */ 6678 6679 /* 6680 * If we're doing active scanning, set the crc_threshold 6681 * to a suitable value. This is different to active veruss 6682 * passive scanning depending upon the channel flags; the 6683 * firmware will obey that particular check for us. 6684 */ 6685 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6686 hdr->crc_threshold = is_active ? 6687 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6688 else 6689 hdr->crc_threshold = is_active ? 6690 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6691 6692 chan = (struct iwn_scan_chan *)frm; 6693 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6694 chan->flags = 0; 6695 if (ss->ss_nssid > 0) 6696 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6697 chan->dsp_gain = 0x6e; 6698 6699 /* 6700 * Set the passive/active flag depending upon the channel mode. 6701 * XXX TODO: take the is_active flag into account as well? 6702 */ 6703 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6704 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6705 else 6706 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6707 6708 /* 6709 * Calculate the active/passive dwell times. 6710 */ 6711 6712 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6713 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6714 6715 /* Make sure they're valid */ 6716 if (dwell_passive <= dwell_active) 6717 dwell_passive = dwell_active + 1; 6718 6719 chan->active = htole16(dwell_active); 6720 chan->passive = htole16(dwell_passive); 6721 6722 if (IEEE80211_IS_CHAN_5GHZ(c) && 6723 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6724 chan->rf_gain = 0x3b; 6725 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6726 chan->rf_gain = 0x3b; 6727 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6728 chan->rf_gain = 0x28; 6729 } else { 6730 chan->rf_gain = 0x28; 6731 } 6732 6733 DPRINTF(sc, IWN_DEBUG_STATE, 6734 "%s: chan %u flags 0x%x rf_gain 0x%x " 6735 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6736 "isactive=%d numssid=%d\n", __func__, 6737 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6738 dwell_active, dwell_passive, scan_service_time, 6739 hdr->crc_threshold, is_active, ss->ss_nssid); 6740 6741 hdr->nchan++; 6742 chan++; 6743 buflen = (uint8_t *)chan - buf; 6744 hdr->len = htole16(buflen); 6745 6746 if (sc->sc_is_scanning) { 6747 device_printf(sc->sc_dev, 6748 "%s: called with is_scanning set!\n", 6749 __func__); 6750 } 6751 sc->sc_is_scanning = 1; 6752 6753 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6754 hdr->nchan); 6755 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6756 free(buf, M_DEVBUF); 6757 6758 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6759 6760 return error; 6761 } 6762 6763 static int 6764 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6765 { 6766 struct iwn_ops *ops = &sc->ops; 6767 struct ifnet *ifp = sc->sc_ifp; 6768 struct ieee80211com *ic = ifp->if_l2com; 6769 struct ieee80211_node *ni = vap->iv_bss; 6770 int error; 6771 6772 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6773 6774 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6775 /* Update adapter configuration. */ 6776 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6777 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6778 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6779 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6780 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6781 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6782 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6783 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6784 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6785 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6786 sc->rxon->cck_mask = 0; 6787 sc->rxon->ofdm_mask = 0x15; 6788 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6789 sc->rxon->cck_mask = 0x03; 6790 sc->rxon->ofdm_mask = 0; 6791 } else { 6792 /* Assume 802.11b/g. */ 6793 sc->rxon->cck_mask = 0x03; 6794 sc->rxon->ofdm_mask = 0x15; 6795 } 6796 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6797 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6798 sc->rxon->ofdm_mask); 6799 if (sc->sc_is_scanning) 6800 device_printf(sc->sc_dev, 6801 "%s: is_scanning set, before RXON\n", 6802 __func__); 6803 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6804 if (error != 0) { 6805 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6806 __func__, error); 6807 return error; 6808 } 6809 6810 /* Configuration has changed, set TX power accordingly. */ 6811 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6812 device_printf(sc->sc_dev, 6813 "%s: could not set TX power, error %d\n", __func__, error); 6814 return error; 6815 } 6816 /* 6817 * Reconfiguring RXON clears the firmware nodes table so we must 6818 * add the broadcast node again. 6819 */ 6820 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6821 device_printf(sc->sc_dev, 6822 "%s: could not add broadcast node, error %d\n", __func__, 6823 error); 6824 return error; 6825 } 6826 6827 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6828 6829 return 0; 6830 } 6831 6832 static int 6833 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6834 { 6835 struct iwn_ops *ops = &sc->ops; 6836 struct ifnet *ifp = sc->sc_ifp; 6837 struct ieee80211com *ic = ifp->if_l2com; 6838 struct ieee80211_node *ni = vap->iv_bss; 6839 struct iwn_node_info node; 6840 uint32_t htflags = 0; 6841 int error; 6842 6843 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6844 6845 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6846 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6847 /* Link LED blinks while monitoring. */ 6848 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6849 return 0; 6850 } 6851 if ((error = iwn_set_timing(sc, ni)) != 0) { 6852 device_printf(sc->sc_dev, 6853 "%s: could not set timing, error %d\n", __func__, error); 6854 return error; 6855 } 6856 6857 /* Update adapter configuration. */ 6858 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6859 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6860 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6861 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6862 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6863 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6864 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6865 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6866 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6867 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6868 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6869 sc->rxon->cck_mask = 0; 6870 sc->rxon->ofdm_mask = 0x15; 6871 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6872 sc->rxon->cck_mask = 0x03; 6873 sc->rxon->ofdm_mask = 0; 6874 } else { 6875 /* Assume 802.11b/g. */ 6876 sc->rxon->cck_mask = 0x0f; 6877 sc->rxon->ofdm_mask = 0x15; 6878 } 6879 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6880 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6881 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6882 switch (ic->ic_curhtprotmode) { 6883 case IEEE80211_HTINFO_OPMODE_HT20PR: 6884 htflags |= IWN_RXON_HT_MODEPURE40; 6885 break; 6886 default: 6887 htflags |= IWN_RXON_HT_MODEMIXED; 6888 break; 6889 } 6890 } 6891 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6892 htflags |= IWN_RXON_HT_HT40MINUS; 6893 } 6894 sc->rxon->flags |= htole32(htflags); 6895 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6896 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6897 sc->rxon->chan, sc->rxon->flags); 6898 if (sc->sc_is_scanning) 6899 device_printf(sc->sc_dev, 6900 "%s: is_scanning set, before RXON\n", 6901 __func__); 6902 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6903 if (error != 0) { 6904 device_printf(sc->sc_dev, 6905 "%s: could not update configuration, error %d\n", __func__, 6906 error); 6907 return error; 6908 } 6909 6910 /* Configuration has changed, set TX power accordingly. */ 6911 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6912 device_printf(sc->sc_dev, 6913 "%s: could not set TX power, error %d\n", __func__, error); 6914 return error; 6915 } 6916 6917 /* Fake a join to initialize the TX rate. */ 6918 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6919 iwn_newassoc(ni, 1); 6920 6921 /* Add BSS node. */ 6922 memset(&node, 0, sizeof node); 6923 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6924 node.id = IWN_ID_BSS; 6925 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6926 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6927 case IEEE80211_HTCAP_SMPS_ENA: 6928 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6929 break; 6930 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6931 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6932 break; 6933 } 6934 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6935 IWN_AMDPU_DENSITY(5)); /* 4us */ 6936 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6937 node.htflags |= htole32(IWN_NODE_HT40); 6938 } 6939 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6940 error = ops->add_node(sc, &node, 1); 6941 if (error != 0) { 6942 device_printf(sc->sc_dev, 6943 "%s: could not add BSS node, error %d\n", __func__, error); 6944 return error; 6945 } 6946 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6947 __func__, node.id); 6948 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6949 device_printf(sc->sc_dev, 6950 "%s: could not setup link quality for node %d, error %d\n", 6951 __func__, node.id, error); 6952 return error; 6953 } 6954 6955 if ((error = iwn_init_sensitivity(sc)) != 0) { 6956 device_printf(sc->sc_dev, 6957 "%s: could not set sensitivity, error %d\n", __func__, 6958 error); 6959 return error; 6960 } 6961 /* Start periodic calibration timer. */ 6962 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6963 sc->calib_cnt = 0; 6964 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6965 sc); 6966 6967 /* Link LED always on while associated. */ 6968 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6969 6970 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6971 6972 return 0; 6973 } 6974 6975 /* 6976 * This function is called by upper layer when an ADDBA request is received 6977 * from another STA and before the ADDBA response is sent. 6978 */ 6979 static int 6980 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6981 int baparamset, int batimeout, int baseqctl) 6982 { 6983 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6984 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6985 struct iwn_ops *ops = &sc->ops; 6986 struct iwn_node *wn = (void *)ni; 6987 struct iwn_node_info node; 6988 uint16_t ssn; 6989 uint8_t tid; 6990 int error; 6991 6992 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6993 6994 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6995 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 6996 6997 memset(&node, 0, sizeof node); 6998 node.id = wn->id; 6999 node.control = IWN_NODE_UPDATE; 7000 node.flags = IWN_FLAG_SET_ADDBA; 7001 node.addba_tid = tid; 7002 node.addba_ssn = htole16(ssn); 7003 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 7004 wn->id, tid, ssn); 7005 error = ops->add_node(sc, &node, 1); 7006 if (error != 0) 7007 return error; 7008 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 7009 #undef MS 7010 } 7011 7012 /* 7013 * This function is called by upper layer on teardown of an HT-immediate 7014 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 7015 */ 7016 static void 7017 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 7018 { 7019 struct ieee80211com *ic = ni->ni_ic; 7020 struct iwn_softc *sc = ic->ic_ifp->if_softc; 7021 struct iwn_ops *ops = &sc->ops; 7022 struct iwn_node *wn = (void *)ni; 7023 struct iwn_node_info node; 7024 uint8_t tid; 7025 7026 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7027 7028 /* XXX: tid as an argument */ 7029 for (tid = 0; tid < WME_NUM_TID; tid++) { 7030 if (&ni->ni_rx_ampdu[tid] == rap) 7031 break; 7032 } 7033 7034 memset(&node, 0, sizeof node); 7035 node.id = wn->id; 7036 node.control = IWN_NODE_UPDATE; 7037 node.flags = IWN_FLAG_SET_DELBA; 7038 node.delba_tid = tid; 7039 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 7040 (void)ops->add_node(sc, &node, 1); 7041 sc->sc_ampdu_rx_stop(ni, rap); 7042 } 7043 7044 static int 7045 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7046 int dialogtoken, int baparamset, int batimeout) 7047 { 7048 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7049 int qid; 7050 7051 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7052 7053 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 7054 if (sc->qid2tap[qid] == NULL) 7055 break; 7056 } 7057 if (qid == sc->ntxqs) { 7058 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 7059 __func__); 7060 return 0; 7061 } 7062 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 7063 if (tap->txa_private == NULL) { 7064 device_printf(sc->sc_dev, 7065 "%s: failed to alloc TX aggregation structure\n", __func__); 7066 return 0; 7067 } 7068 sc->qid2tap[qid] = tap; 7069 *(int *)tap->txa_private = qid; 7070 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 7071 batimeout); 7072 } 7073 7074 static int 7075 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7076 int code, int baparamset, int batimeout) 7077 { 7078 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7079 int qid = *(int *)tap->txa_private; 7080 uint8_t tid = tap->txa_tid; 7081 int ret; 7082 7083 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7084 7085 if (code == IEEE80211_STATUS_SUCCESS) { 7086 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 7087 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 7088 if (ret != 1) 7089 return ret; 7090 } else { 7091 sc->qid2tap[qid] = NULL; 7092 free(tap->txa_private, M_DEVBUF); 7093 tap->txa_private = NULL; 7094 } 7095 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 7096 } 7097 7098 /* 7099 * This function is called by upper layer when an ADDBA response is received 7100 * from another STA. 7101 */ 7102 static int 7103 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 7104 uint8_t tid) 7105 { 7106 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 7107 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7108 struct iwn_ops *ops = &sc->ops; 7109 struct iwn_node *wn = (void *)ni; 7110 struct iwn_node_info node; 7111 int error, qid; 7112 7113 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7114 7115 /* Enable TX for the specified RA/TID. */ 7116 wn->disable_tid &= ~(1 << tid); 7117 memset(&node, 0, sizeof node); 7118 node.id = wn->id; 7119 node.control = IWN_NODE_UPDATE; 7120 node.flags = IWN_FLAG_SET_DISABLE_TID; 7121 node.disable_tid = htole16(wn->disable_tid); 7122 error = ops->add_node(sc, &node, 1); 7123 if (error != 0) 7124 return 0; 7125 7126 if ((error = iwn_nic_lock(sc)) != 0) 7127 return 0; 7128 qid = *(int *)tap->txa_private; 7129 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 7130 __func__, wn->id, tid, tap->txa_start, qid); 7131 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 7132 iwn_nic_unlock(sc); 7133 7134 iwn_set_link_quality(sc, ni); 7135 return 1; 7136 } 7137 7138 static void 7139 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 7140 { 7141 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7142 struct iwn_ops *ops = &sc->ops; 7143 uint8_t tid = tap->txa_tid; 7144 int qid; 7145 7146 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7147 7148 sc->sc_addba_stop(ni, tap); 7149 7150 if (tap->txa_private == NULL) 7151 return; 7152 7153 qid = *(int *)tap->txa_private; 7154 if (sc->txq[qid].queued != 0) 7155 return; 7156 if (iwn_nic_lock(sc) != 0) 7157 return; 7158 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7159 iwn_nic_unlock(sc); 7160 sc->qid2tap[qid] = NULL; 7161 free(tap->txa_private, M_DEVBUF); 7162 tap->txa_private = NULL; 7163 } 7164 7165 static void 7166 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7167 int qid, uint8_t tid, uint16_t ssn) 7168 { 7169 struct iwn_node *wn = (void *)ni; 7170 7171 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7172 7173 /* Stop TX scheduler while we're changing its configuration. */ 7174 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7175 IWN4965_TXQ_STATUS_CHGACT); 7176 7177 /* Assign RA/TID translation to the queue. */ 7178 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7179 wn->id << 4 | tid); 7180 7181 /* Enable chain-building mode for the queue. */ 7182 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7183 7184 /* Set starting sequence number from the ADDBA request. */ 7185 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7186 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7187 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7188 7189 /* Set scheduler window size. */ 7190 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7191 IWN_SCHED_WINSZ); 7192 /* Set scheduler frame limit. */ 7193 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7194 IWN_SCHED_LIMIT << 16); 7195 7196 /* Enable interrupts for the queue. */ 7197 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7198 7199 /* Mark the queue as active. */ 7200 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7201 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7202 iwn_tid2fifo[tid] << 1); 7203 } 7204 7205 static void 7206 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7207 { 7208 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7209 7210 /* Stop TX scheduler while we're changing its configuration. */ 7211 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7212 IWN4965_TXQ_STATUS_CHGACT); 7213 7214 /* Set starting sequence number from the ADDBA request. */ 7215 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7216 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7217 7218 /* Disable interrupts for the queue. */ 7219 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7220 7221 /* Mark the queue as inactive. */ 7222 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7223 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7224 } 7225 7226 static void 7227 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7228 int qid, uint8_t tid, uint16_t ssn) 7229 { 7230 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7231 7232 struct iwn_node *wn = (void *)ni; 7233 7234 /* Stop TX scheduler while we're changing its configuration. */ 7235 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7236 IWN5000_TXQ_STATUS_CHGACT); 7237 7238 /* Assign RA/TID translation to the queue. */ 7239 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7240 wn->id << 4 | tid); 7241 7242 /* Enable chain-building mode for the queue. */ 7243 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7244 7245 /* Enable aggregation for the queue. */ 7246 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7247 7248 /* Set starting sequence number from the ADDBA request. */ 7249 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7250 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7251 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7252 7253 /* Set scheduler window size and frame limit. */ 7254 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7255 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7256 7257 /* Enable interrupts for the queue. */ 7258 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7259 7260 /* Mark the queue as active. */ 7261 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7262 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7263 } 7264 7265 static void 7266 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7267 { 7268 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7269 7270 /* Stop TX scheduler while we're changing its configuration. */ 7271 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7272 IWN5000_TXQ_STATUS_CHGACT); 7273 7274 /* Disable aggregation for the queue. */ 7275 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7276 7277 /* Set starting sequence number from the ADDBA request. */ 7278 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7279 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7280 7281 /* Disable interrupts for the queue. */ 7282 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7283 7284 /* Mark the queue as inactive. */ 7285 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7286 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7287 } 7288 7289 /* 7290 * Query calibration tables from the initialization firmware. We do this 7291 * only once at first boot. Called from a process context. 7292 */ 7293 static int 7294 iwn5000_query_calibration(struct iwn_softc *sc) 7295 { 7296 struct iwn5000_calib_config cmd; 7297 int error; 7298 7299 memset(&cmd, 0, sizeof cmd); 7300 cmd.ucode.once.enable = htole32(0xffffffff); 7301 cmd.ucode.once.start = htole32(0xffffffff); 7302 cmd.ucode.once.send = htole32(0xffffffff); 7303 cmd.ucode.flags = htole32(0xffffffff); 7304 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7305 __func__); 7306 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7307 if (error != 0) 7308 return error; 7309 7310 /* Wait at most two seconds for calibration to complete. */ 7311 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7312 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7313 return error; 7314 } 7315 7316 /* 7317 * Send calibration results to the runtime firmware. These results were 7318 * obtained on first boot from the initialization firmware. 7319 */ 7320 static int 7321 iwn5000_send_calibration(struct iwn_softc *sc) 7322 { 7323 int idx, error; 7324 7325 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7326 if (!(sc->base_params->calib_need & (1<<idx))) { 7327 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7328 "No need of calib %d\n", 7329 idx); 7330 continue; /* no need for this calib */ 7331 } 7332 if (sc->calibcmd[idx].buf == NULL) { 7333 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7334 "Need calib idx : %d but no available data\n", 7335 idx); 7336 continue; 7337 } 7338 7339 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7340 "send calibration result idx=%d len=%d\n", idx, 7341 sc->calibcmd[idx].len); 7342 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7343 sc->calibcmd[idx].len, 0); 7344 if (error != 0) { 7345 device_printf(sc->sc_dev, 7346 "%s: could not send calibration result, error %d\n", 7347 __func__, error); 7348 return error; 7349 } 7350 } 7351 return 0; 7352 } 7353 7354 static int 7355 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7356 { 7357 struct iwn5000_wimax_coex wimax; 7358 7359 #if 0 7360 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7361 /* Enable WiMAX coexistence for combo adapters. */ 7362 wimax.flags = 7363 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7364 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7365 IWN_WIMAX_COEX_STA_TABLE_VALID | 7366 IWN_WIMAX_COEX_ENABLE; 7367 memcpy(wimax.events, iwn6050_wimax_events, 7368 sizeof iwn6050_wimax_events); 7369 } else 7370 #endif 7371 { 7372 /* Disable WiMAX coexistence. */ 7373 wimax.flags = 0; 7374 memset(wimax.events, 0, sizeof wimax.events); 7375 } 7376 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7377 __func__); 7378 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7379 } 7380 7381 static int 7382 iwn5000_crystal_calib(struct iwn_softc *sc) 7383 { 7384 struct iwn5000_phy_calib_crystal cmd; 7385 7386 memset(&cmd, 0, sizeof cmd); 7387 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7388 cmd.ngroups = 1; 7389 cmd.isvalid = 1; 7390 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7391 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7392 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7393 cmd.cap_pin[0], cmd.cap_pin[1]); 7394 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7395 } 7396 7397 static int 7398 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7399 { 7400 struct iwn5000_phy_calib_temp_offset cmd; 7401 7402 memset(&cmd, 0, sizeof cmd); 7403 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7404 cmd.ngroups = 1; 7405 cmd.isvalid = 1; 7406 if (sc->eeprom_temp != 0) 7407 cmd.offset = htole16(sc->eeprom_temp); 7408 else 7409 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7410 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7411 le16toh(cmd.offset)); 7412 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7413 } 7414 7415 static int 7416 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7417 { 7418 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7419 7420 memset(&cmd, 0, sizeof cmd); 7421 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7422 cmd.ngroups = 1; 7423 cmd.isvalid = 1; 7424 if (sc->eeprom_temp != 0) { 7425 cmd.offset_low = htole16(sc->eeprom_temp); 7426 cmd.offset_high = htole16(sc->eeprom_temp_high); 7427 } else { 7428 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7429 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7430 } 7431 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7432 7433 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7434 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7435 le16toh(cmd.offset_low), 7436 le16toh(cmd.offset_high), 7437 le16toh(cmd.burnt_voltage_ref)); 7438 7439 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7440 } 7441 7442 /* 7443 * This function is called after the runtime firmware notifies us of its 7444 * readiness (called in a process context). 7445 */ 7446 static int 7447 iwn4965_post_alive(struct iwn_softc *sc) 7448 { 7449 int error, qid; 7450 7451 if ((error = iwn_nic_lock(sc)) != 0) 7452 return error; 7453 7454 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7455 7456 /* Clear TX scheduler state in SRAM. */ 7457 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7458 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7459 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7460 7461 /* Set physical address of TX scheduler rings (1KB aligned). */ 7462 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7463 7464 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7465 7466 /* Disable chain mode for all our 16 queues. */ 7467 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7468 7469 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7470 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7471 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7472 7473 /* Set scheduler window size. */ 7474 iwn_mem_write(sc, sc->sched_base + 7475 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7476 /* Set scheduler frame limit. */ 7477 iwn_mem_write(sc, sc->sched_base + 7478 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7479 IWN_SCHED_LIMIT << 16); 7480 } 7481 7482 /* Enable interrupts for all our 16 queues. */ 7483 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7484 /* Identify TX FIFO rings (0-7). */ 7485 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7486 7487 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7488 for (qid = 0; qid < 7; qid++) { 7489 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7490 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7491 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7492 } 7493 iwn_nic_unlock(sc); 7494 return 0; 7495 } 7496 7497 /* 7498 * This function is called after the initialization or runtime firmware 7499 * notifies us of its readiness (called in a process context). 7500 */ 7501 static int 7502 iwn5000_post_alive(struct iwn_softc *sc) 7503 { 7504 int error, qid; 7505 7506 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7507 7508 /* Switch to using ICT interrupt mode. */ 7509 iwn5000_ict_reset(sc); 7510 7511 if ((error = iwn_nic_lock(sc)) != 0){ 7512 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7513 return error; 7514 } 7515 7516 /* Clear TX scheduler state in SRAM. */ 7517 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7518 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7519 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7520 7521 /* Set physical address of TX scheduler rings (1KB aligned). */ 7522 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7523 7524 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7525 7526 /* Enable chain mode for all queues, except command queue. */ 7527 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7528 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7529 else 7530 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7531 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7532 7533 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7534 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7535 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7536 7537 iwn_mem_write(sc, sc->sched_base + 7538 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7539 /* Set scheduler window size and frame limit. */ 7540 iwn_mem_write(sc, sc->sched_base + 7541 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7542 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7543 } 7544 7545 /* Enable interrupts for all our 20 queues. */ 7546 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7547 /* Identify TX FIFO rings (0-7). */ 7548 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7549 7550 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7551 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7552 /* Mark TX rings as active. */ 7553 for (qid = 0; qid < 11; qid++) { 7554 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7555 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7556 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7557 } 7558 } else { 7559 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7560 for (qid = 0; qid < 7; qid++) { 7561 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7562 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7563 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7564 } 7565 } 7566 iwn_nic_unlock(sc); 7567 7568 /* Configure WiMAX coexistence for combo adapters. */ 7569 error = iwn5000_send_wimax_coex(sc); 7570 if (error != 0) { 7571 device_printf(sc->sc_dev, 7572 "%s: could not configure WiMAX coexistence, error %d\n", 7573 __func__, error); 7574 return error; 7575 } 7576 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7577 /* Perform crystal calibration. */ 7578 error = iwn5000_crystal_calib(sc); 7579 if (error != 0) { 7580 device_printf(sc->sc_dev, 7581 "%s: crystal calibration failed, error %d\n", 7582 __func__, error); 7583 return error; 7584 } 7585 } 7586 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7587 /* Query calibration from the initialization firmware. */ 7588 if ((error = iwn5000_query_calibration(sc)) != 0) { 7589 device_printf(sc->sc_dev, 7590 "%s: could not query calibration, error %d\n", 7591 __func__, error); 7592 return error; 7593 } 7594 /* 7595 * We have the calibration results now, reboot with the 7596 * runtime firmware (call ourselves recursively!) 7597 */ 7598 iwn_hw_stop(sc); 7599 error = iwn_hw_init(sc); 7600 } else { 7601 /* Send calibration results to runtime firmware. */ 7602 error = iwn5000_send_calibration(sc); 7603 } 7604 7605 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7606 7607 return error; 7608 } 7609 7610 /* 7611 * The firmware boot code is small and is intended to be copied directly into 7612 * the NIC internal memory (no DMA transfer). 7613 */ 7614 static int 7615 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7616 { 7617 int error, ntries; 7618 7619 size /= sizeof (uint32_t); 7620 7621 if ((error = iwn_nic_lock(sc)) != 0) 7622 return error; 7623 7624 /* Copy microcode image into NIC memory. */ 7625 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7626 (const uint32_t *)ucode, size); 7627 7628 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7629 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7630 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7631 7632 /* Start boot load now. */ 7633 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7634 7635 /* Wait for transfer to complete. */ 7636 for (ntries = 0; ntries < 1000; ntries++) { 7637 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7638 IWN_BSM_WR_CTRL_START)) 7639 break; 7640 DELAY(10); 7641 } 7642 if (ntries == 1000) { 7643 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7644 __func__); 7645 iwn_nic_unlock(sc); 7646 return ETIMEDOUT; 7647 } 7648 7649 /* Enable boot after power up. */ 7650 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7651 7652 iwn_nic_unlock(sc); 7653 return 0; 7654 } 7655 7656 static int 7657 iwn4965_load_firmware(struct iwn_softc *sc) 7658 { 7659 struct iwn_fw_info *fw = &sc->fw; 7660 struct iwn_dma_info *dma = &sc->fw_dma; 7661 int error; 7662 7663 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7664 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7665 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7666 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7667 fw->init.text, fw->init.textsz); 7668 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7669 7670 /* Tell adapter where to find initialization sections. */ 7671 if ((error = iwn_nic_lock(sc)) != 0) 7672 return error; 7673 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7674 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7675 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7676 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7677 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7678 iwn_nic_unlock(sc); 7679 7680 /* Load firmware boot code. */ 7681 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7682 if (error != 0) { 7683 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7684 __func__); 7685 return error; 7686 } 7687 /* Now press "execute". */ 7688 IWN_WRITE(sc, IWN_RESET, 0); 7689 7690 /* Wait at most one second for first alive notification. */ 7691 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7692 device_printf(sc->sc_dev, 7693 "%s: timeout waiting for adapter to initialize, error %d\n", 7694 __func__, error); 7695 return error; 7696 } 7697 7698 /* Retrieve current temperature for initial TX power calibration. */ 7699 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7700 sc->temp = iwn4965_get_temperature(sc); 7701 7702 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7703 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7704 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7705 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7706 fw->main.text, fw->main.textsz); 7707 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7708 7709 /* Tell adapter where to find runtime sections. */ 7710 if ((error = iwn_nic_lock(sc)) != 0) 7711 return error; 7712 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7713 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7714 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7715 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7716 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7717 IWN_FW_UPDATED | fw->main.textsz); 7718 iwn_nic_unlock(sc); 7719 7720 return 0; 7721 } 7722 7723 static int 7724 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7725 const uint8_t *section, int size) 7726 { 7727 struct iwn_dma_info *dma = &sc->fw_dma; 7728 int error; 7729 7730 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7731 7732 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7733 memcpy(dma->vaddr, section, size); 7734 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7735 7736 if ((error = iwn_nic_lock(sc)) != 0) 7737 return error; 7738 7739 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7740 IWN_FH_TX_CONFIG_DMA_PAUSE); 7741 7742 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7743 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7744 IWN_LOADDR(dma->paddr)); 7745 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7746 IWN_HIADDR(dma->paddr) << 28 | size); 7747 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7748 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7749 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7750 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7751 7752 /* Kick Flow Handler to start DMA transfer. */ 7753 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7754 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7755 7756 iwn_nic_unlock(sc); 7757 7758 /* Wait at most five seconds for FH DMA transfer to complete. */ 7759 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 7760 } 7761 7762 static int 7763 iwn5000_load_firmware(struct iwn_softc *sc) 7764 { 7765 struct iwn_fw_part *fw; 7766 int error; 7767 7768 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7769 7770 /* Load the initialization firmware on first boot only. */ 7771 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7772 &sc->fw.main : &sc->fw.init; 7773 7774 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7775 fw->text, fw->textsz); 7776 if (error != 0) { 7777 device_printf(sc->sc_dev, 7778 "%s: could not load firmware %s section, error %d\n", 7779 __func__, ".text", error); 7780 return error; 7781 } 7782 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7783 fw->data, fw->datasz); 7784 if (error != 0) { 7785 device_printf(sc->sc_dev, 7786 "%s: could not load firmware %s section, error %d\n", 7787 __func__, ".data", error); 7788 return error; 7789 } 7790 7791 /* Now press "execute". */ 7792 IWN_WRITE(sc, IWN_RESET, 0); 7793 return 0; 7794 } 7795 7796 /* 7797 * Extract text and data sections from a legacy firmware image. 7798 */ 7799 static int 7800 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7801 { 7802 const uint32_t *ptr; 7803 size_t hdrlen = 24; 7804 uint32_t rev; 7805 7806 ptr = (const uint32_t *)fw->data; 7807 rev = le32toh(*ptr++); 7808 7809 /* Check firmware API version. */ 7810 if (IWN_FW_API(rev) <= 1) { 7811 device_printf(sc->sc_dev, 7812 "%s: bad firmware, need API version >=2\n", __func__); 7813 return EINVAL; 7814 } 7815 if (IWN_FW_API(rev) >= 3) { 7816 /* Skip build number (version 2 header). */ 7817 hdrlen += 4; 7818 ptr++; 7819 } 7820 if (fw->size < hdrlen) { 7821 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7822 __func__, fw->size); 7823 return EINVAL; 7824 } 7825 fw->main.textsz = le32toh(*ptr++); 7826 fw->main.datasz = le32toh(*ptr++); 7827 fw->init.textsz = le32toh(*ptr++); 7828 fw->init.datasz = le32toh(*ptr++); 7829 fw->boot.textsz = le32toh(*ptr++); 7830 7831 /* Check that all firmware sections fit. */ 7832 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7833 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7834 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7835 __func__, fw->size); 7836 return EINVAL; 7837 } 7838 7839 /* Get pointers to firmware sections. */ 7840 fw->main.text = (const uint8_t *)ptr; 7841 fw->main.data = fw->main.text + fw->main.textsz; 7842 fw->init.text = fw->main.data + fw->main.datasz; 7843 fw->init.data = fw->init.text + fw->init.textsz; 7844 fw->boot.text = fw->init.data + fw->init.datasz; 7845 return 0; 7846 } 7847 7848 /* 7849 * Extract text and data sections from a TLV firmware image. 7850 */ 7851 static int 7852 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7853 uint16_t alt) 7854 { 7855 const struct iwn_fw_tlv_hdr *hdr; 7856 const struct iwn_fw_tlv *tlv; 7857 const uint8_t *ptr, *end; 7858 uint64_t altmask; 7859 uint32_t len, tmp; 7860 7861 if (fw->size < sizeof (*hdr)) { 7862 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7863 __func__, fw->size); 7864 return EINVAL; 7865 } 7866 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7867 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7868 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7869 __func__, le32toh(hdr->signature)); 7870 return EINVAL; 7871 } 7872 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7873 le32toh(hdr->build)); 7874 7875 /* 7876 * Select the closest supported alternative that is less than 7877 * or equal to the specified one. 7878 */ 7879 altmask = le64toh(hdr->altmask); 7880 while (alt > 0 && !(altmask & (1ULL << alt))) 7881 alt--; /* Downgrade. */ 7882 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7883 7884 ptr = (const uint8_t *)(hdr + 1); 7885 end = (const uint8_t *)(fw->data + fw->size); 7886 7887 /* Parse type-length-value fields. */ 7888 while (ptr + sizeof (*tlv) <= end) { 7889 tlv = (const struct iwn_fw_tlv *)ptr; 7890 len = le32toh(tlv->len); 7891 7892 ptr += sizeof (*tlv); 7893 if (ptr + len > end) { 7894 device_printf(sc->sc_dev, 7895 "%s: firmware too short: %zu bytes\n", __func__, 7896 fw->size); 7897 return EINVAL; 7898 } 7899 /* Skip other alternatives. */ 7900 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7901 goto next; 7902 7903 switch (le16toh(tlv->type)) { 7904 case IWN_FW_TLV_MAIN_TEXT: 7905 fw->main.text = ptr; 7906 fw->main.textsz = len; 7907 break; 7908 case IWN_FW_TLV_MAIN_DATA: 7909 fw->main.data = ptr; 7910 fw->main.datasz = len; 7911 break; 7912 case IWN_FW_TLV_INIT_TEXT: 7913 fw->init.text = ptr; 7914 fw->init.textsz = len; 7915 break; 7916 case IWN_FW_TLV_INIT_DATA: 7917 fw->init.data = ptr; 7918 fw->init.datasz = len; 7919 break; 7920 case IWN_FW_TLV_BOOT_TEXT: 7921 fw->boot.text = ptr; 7922 fw->boot.textsz = len; 7923 break; 7924 case IWN_FW_TLV_ENH_SENS: 7925 if (!len) 7926 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7927 break; 7928 case IWN_FW_TLV_PHY_CALIB: 7929 tmp = le32toh(*ptr); 7930 if (tmp < 253) { 7931 sc->reset_noise_gain = tmp; 7932 sc->noise_gain = tmp + 1; 7933 } 7934 break; 7935 case IWN_FW_TLV_PAN: 7936 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7937 DPRINTF(sc, IWN_DEBUG_RESET, 7938 "PAN Support found: %d\n", 1); 7939 break; 7940 case IWN_FW_TLV_FLAGS: 7941 if (len < sizeof(uint32_t)) 7942 break; 7943 if (len % sizeof(uint32_t)) 7944 break; 7945 sc->tlv_feature_flags = le32toh(*ptr); 7946 DPRINTF(sc, IWN_DEBUG_RESET, 7947 "%s: feature: 0x%08x\n", 7948 __func__, 7949 sc->tlv_feature_flags); 7950 break; 7951 case IWN_FW_TLV_PBREQ_MAXLEN: 7952 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7953 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7954 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7955 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7956 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7957 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7958 case IWN_FW_TLV_WOWLAN_INST: 7959 case IWN_FW_TLV_WOWLAN_DATA: 7960 DPRINTF(sc, IWN_DEBUG_RESET, 7961 "TLV type %d reconized but not handled\n", 7962 le16toh(tlv->type)); 7963 break; 7964 default: 7965 DPRINTF(sc, IWN_DEBUG_RESET, 7966 "TLV type %d not handled\n", le16toh(tlv->type)); 7967 break; 7968 } 7969 next: /* TLV fields are 32-bit aligned. */ 7970 ptr += (len + 3) & ~3; 7971 } 7972 return 0; 7973 } 7974 7975 static int 7976 iwn_read_firmware(struct iwn_softc *sc) 7977 { 7978 struct iwn_fw_info *fw = &sc->fw; 7979 int error; 7980 7981 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7982 7983 IWN_UNLOCK(sc); 7984 7985 memset(fw, 0, sizeof (*fw)); 7986 7987 /* Read firmware image from filesystem. */ 7988 sc->fw_fp = firmware_get(sc->fwname); 7989 if (sc->fw_fp == NULL) { 7990 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7991 __func__, sc->fwname); 7992 IWN_LOCK(sc); 7993 return EINVAL; 7994 } 7995 IWN_LOCK(sc); 7996 7997 fw->size = sc->fw_fp->datasize; 7998 fw->data = (const uint8_t *)sc->fw_fp->data; 7999 if (fw->size < sizeof (uint32_t)) { 8000 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8001 __func__, fw->size); 8002 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8003 sc->fw_fp = NULL; 8004 return EINVAL; 8005 } 8006 8007 /* Retrieve text and data sections. */ 8008 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 8009 error = iwn_read_firmware_leg(sc, fw); 8010 else 8011 error = iwn_read_firmware_tlv(sc, fw, 1); 8012 if (error != 0) { 8013 device_printf(sc->sc_dev, 8014 "%s: could not read firmware sections, error %d\n", 8015 __func__, error); 8016 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8017 sc->fw_fp = NULL; 8018 return error; 8019 } 8020 8021 /* Make sure text and data sections fit in hardware memory. */ 8022 if (fw->main.textsz > sc->fw_text_maxsz || 8023 fw->main.datasz > sc->fw_data_maxsz || 8024 fw->init.textsz > sc->fw_text_maxsz || 8025 fw->init.datasz > sc->fw_data_maxsz || 8026 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 8027 (fw->boot.textsz & 3) != 0) { 8028 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 8029 __func__); 8030 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8031 sc->fw_fp = NULL; 8032 return EINVAL; 8033 } 8034 8035 /* We can proceed with loading the firmware. */ 8036 return 0; 8037 } 8038 8039 static int 8040 iwn_clock_wait(struct iwn_softc *sc) 8041 { 8042 int ntries; 8043 8044 /* Set "initialization complete" bit. */ 8045 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8046 8047 /* Wait for clock stabilization. */ 8048 for (ntries = 0; ntries < 2500; ntries++) { 8049 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 8050 return 0; 8051 DELAY(10); 8052 } 8053 device_printf(sc->sc_dev, 8054 "%s: timeout waiting for clock stabilization\n", __func__); 8055 return ETIMEDOUT; 8056 } 8057 8058 static int 8059 iwn_apm_init(struct iwn_softc *sc) 8060 { 8061 uint32_t reg; 8062 int error; 8063 8064 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8065 8066 /* Disable L0s exit timer (NMI bug workaround). */ 8067 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 8068 /* Don't wait for ICH L0s (ICH bug workaround). */ 8069 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 8070 8071 /* Set FH wait threshold to max (HW bug under stress workaround). */ 8072 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 8073 8074 /* Enable HAP INTA to move adapter from L1a to L0s. */ 8075 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 8076 8077 /* Retrieve PCIe Active State Power Management (ASPM). */ 8078 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 8079 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 8080 if (reg & 0x02) /* L1 Entry enabled. */ 8081 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8082 else 8083 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8084 8085 if (sc->base_params->pll_cfg_val) 8086 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 8087 8088 /* Wait for clock stabilization before accessing prph. */ 8089 if ((error = iwn_clock_wait(sc)) != 0) 8090 return error; 8091 8092 if ((error = iwn_nic_lock(sc)) != 0) 8093 return error; 8094 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 8095 /* Enable DMA and BSM (Bootstrap State Machine). */ 8096 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8097 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 8098 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 8099 } else { 8100 /* Enable DMA. */ 8101 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8102 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8103 } 8104 DELAY(20); 8105 /* Disable L1-Active. */ 8106 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 8107 iwn_nic_unlock(sc); 8108 8109 return 0; 8110 } 8111 8112 static void 8113 iwn_apm_stop_master(struct iwn_softc *sc) 8114 { 8115 int ntries; 8116 8117 /* Stop busmaster DMA activity. */ 8118 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 8119 for (ntries = 0; ntries < 100; ntries++) { 8120 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 8121 return; 8122 DELAY(10); 8123 } 8124 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 8125 } 8126 8127 static void 8128 iwn_apm_stop(struct iwn_softc *sc) 8129 { 8130 iwn_apm_stop_master(sc); 8131 8132 /* Reset the entire device. */ 8133 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 8134 DELAY(10); 8135 /* Clear "initialization complete" bit. */ 8136 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8137 } 8138 8139 static int 8140 iwn4965_nic_config(struct iwn_softc *sc) 8141 { 8142 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8143 8144 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 8145 /* 8146 * I don't believe this to be correct but this is what the 8147 * vendor driver is doing. Probably the bits should not be 8148 * shifted in IWN_RFCFG_*. 8149 */ 8150 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8151 IWN_RFCFG_TYPE(sc->rfcfg) | 8152 IWN_RFCFG_STEP(sc->rfcfg) | 8153 IWN_RFCFG_DASH(sc->rfcfg)); 8154 } 8155 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8156 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8157 return 0; 8158 } 8159 8160 static int 8161 iwn5000_nic_config(struct iwn_softc *sc) 8162 { 8163 uint32_t tmp; 8164 int error; 8165 8166 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8167 8168 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8169 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8170 IWN_RFCFG_TYPE(sc->rfcfg) | 8171 IWN_RFCFG_STEP(sc->rfcfg) | 8172 IWN_RFCFG_DASH(sc->rfcfg)); 8173 } 8174 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8175 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8176 8177 if ((error = iwn_nic_lock(sc)) != 0) 8178 return error; 8179 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8180 8181 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8182 /* 8183 * Select first Switching Voltage Regulator (1.32V) to 8184 * solve a stability issue related to noisy DC2DC line 8185 * in the silicon of 1000 Series. 8186 */ 8187 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8188 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8189 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8190 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8191 } 8192 iwn_nic_unlock(sc); 8193 8194 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8195 /* Use internal power amplifier only. */ 8196 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8197 } 8198 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8199 /* Indicate that ROM calibration version is >=6. */ 8200 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8201 } 8202 if (sc->base_params->additional_gp_drv_bit) 8203 IWN_SETBITS(sc, IWN_GP_DRIVER, 8204 sc->base_params->additional_gp_drv_bit); 8205 return 0; 8206 } 8207 8208 /* 8209 * Take NIC ownership over Intel Active Management Technology (AMT). 8210 */ 8211 static int 8212 iwn_hw_prepare(struct iwn_softc *sc) 8213 { 8214 int ntries; 8215 8216 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8217 8218 /* Check if hardware is ready. */ 8219 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8220 for (ntries = 0; ntries < 5; ntries++) { 8221 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8222 IWN_HW_IF_CONFIG_NIC_READY) 8223 return 0; 8224 DELAY(10); 8225 } 8226 8227 /* Hardware not ready, force into ready state. */ 8228 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8229 for (ntries = 0; ntries < 15000; ntries++) { 8230 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8231 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8232 break; 8233 DELAY(10); 8234 } 8235 if (ntries == 15000) 8236 return ETIMEDOUT; 8237 8238 /* Hardware should be ready now. */ 8239 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8240 for (ntries = 0; ntries < 5; ntries++) { 8241 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8242 IWN_HW_IF_CONFIG_NIC_READY) 8243 return 0; 8244 DELAY(10); 8245 } 8246 return ETIMEDOUT; 8247 } 8248 8249 static int 8250 iwn_hw_init(struct iwn_softc *sc) 8251 { 8252 struct iwn_ops *ops = &sc->ops; 8253 int error, chnl, qid; 8254 8255 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8256 8257 /* Clear pending interrupts. */ 8258 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8259 8260 if ((error = iwn_apm_init(sc)) != 0) { 8261 device_printf(sc->sc_dev, 8262 "%s: could not power ON adapter, error %d\n", __func__, 8263 error); 8264 return error; 8265 } 8266 8267 /* Select VMAIN power source. */ 8268 if ((error = iwn_nic_lock(sc)) != 0) 8269 return error; 8270 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8271 iwn_nic_unlock(sc); 8272 8273 /* Perform adapter-specific initialization. */ 8274 if ((error = ops->nic_config(sc)) != 0) 8275 return error; 8276 8277 /* Initialize RX ring. */ 8278 if ((error = iwn_nic_lock(sc)) != 0) 8279 return error; 8280 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8281 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8282 /* Set physical address of RX ring (256-byte aligned). */ 8283 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8284 /* Set physical address of RX status (16-byte aligned). */ 8285 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8286 /* Enable RX. */ 8287 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8288 IWN_FH_RX_CONFIG_ENA | 8289 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8290 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8291 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8292 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8293 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8294 iwn_nic_unlock(sc); 8295 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8296 8297 if ((error = iwn_nic_lock(sc)) != 0) 8298 return error; 8299 8300 /* Initialize TX scheduler. */ 8301 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8302 8303 /* Set physical address of "keep warm" page (16-byte aligned). */ 8304 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8305 8306 /* Initialize TX rings. */ 8307 for (qid = 0; qid < sc->ntxqs; qid++) { 8308 struct iwn_tx_ring *txq = &sc->txq[qid]; 8309 8310 /* Set physical address of TX ring (256-byte aligned). */ 8311 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8312 txq->desc_dma.paddr >> 8); 8313 } 8314 iwn_nic_unlock(sc); 8315 8316 /* Enable DMA channels. */ 8317 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8318 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8319 IWN_FH_TX_CONFIG_DMA_ENA | 8320 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8321 } 8322 8323 /* Clear "radio off" and "commands blocked" bits. */ 8324 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8325 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8326 8327 /* Clear pending interrupts. */ 8328 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8329 /* Enable interrupt coalescing. */ 8330 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8331 /* Enable interrupts. */ 8332 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8333 8334 /* _Really_ make sure "radio off" bit is cleared! */ 8335 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8336 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8337 8338 /* Enable shadow registers. */ 8339 if (sc->base_params->shadow_reg_enable) 8340 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8341 8342 if ((error = ops->load_firmware(sc)) != 0) { 8343 device_printf(sc->sc_dev, 8344 "%s: could not load firmware, error %d\n", __func__, 8345 error); 8346 return error; 8347 } 8348 /* Wait at most one second for firmware alive notification. */ 8349 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8350 device_printf(sc->sc_dev, 8351 "%s: timeout waiting for adapter to initialize, error %d\n", 8352 __func__, error); 8353 return error; 8354 } 8355 /* Do post-firmware initialization. */ 8356 8357 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8358 8359 return ops->post_alive(sc); 8360 } 8361 8362 static void 8363 iwn_hw_stop(struct iwn_softc *sc) 8364 { 8365 int chnl, qid, ntries; 8366 8367 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8368 8369 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8370 8371 /* Disable interrupts. */ 8372 IWN_WRITE(sc, IWN_INT_MASK, 0); 8373 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8374 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8375 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8376 8377 /* Make sure we no longer hold the NIC lock. */ 8378 iwn_nic_unlock(sc); 8379 8380 /* Stop TX scheduler. */ 8381 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8382 8383 /* Stop all DMA channels. */ 8384 if (iwn_nic_lock(sc) == 0) { 8385 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8386 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8387 for (ntries = 0; ntries < 200; ntries++) { 8388 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8389 IWN_FH_TX_STATUS_IDLE(chnl)) 8390 break; 8391 DELAY(10); 8392 } 8393 } 8394 iwn_nic_unlock(sc); 8395 } 8396 8397 /* Stop RX ring. */ 8398 iwn_reset_rx_ring(sc, &sc->rxq); 8399 8400 /* Reset all TX rings. */ 8401 for (qid = 0; qid < sc->ntxqs; qid++) 8402 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8403 8404 if (iwn_nic_lock(sc) == 0) { 8405 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8406 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8407 iwn_nic_unlock(sc); 8408 } 8409 DELAY(5); 8410 /* Power OFF adapter. */ 8411 iwn_apm_stop(sc); 8412 } 8413 8414 static void 8415 iwn_radio_on(void *arg0, int pending) 8416 { 8417 struct iwn_softc *sc = arg0; 8418 struct ifnet *ifp = sc->sc_ifp; 8419 struct ieee80211com *ic = ifp->if_l2com; 8420 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8421 8422 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8423 8424 if (vap != NULL) { 8425 iwn_init(sc); 8426 ieee80211_init(vap); 8427 } 8428 } 8429 8430 static void 8431 iwn_radio_off(void *arg0, int pending) 8432 { 8433 struct iwn_softc *sc = arg0; 8434 struct ifnet *ifp = sc->sc_ifp; 8435 struct ieee80211com *ic = ifp->if_l2com; 8436 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8437 8438 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8439 8440 iwn_stop(sc); 8441 if (vap != NULL) 8442 ieee80211_stop(vap); 8443 8444 /* Enable interrupts to get RF toggle notification. */ 8445 IWN_LOCK(sc); 8446 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8447 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8448 IWN_UNLOCK(sc); 8449 } 8450 8451 static void 8452 iwn_panicked(void *arg0, int pending) 8453 { 8454 struct iwn_softc *sc = arg0; 8455 struct ifnet *ifp = sc->sc_ifp; 8456 struct ieee80211com *ic = ifp->if_l2com; 8457 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8458 int error; 8459 8460 if (vap == NULL) { 8461 printf("%s: null vap\n", __func__); 8462 return; 8463 } 8464 8465 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 8466 "resetting...\n", __func__, vap->iv_state); 8467 8468 IWN_LOCK(sc); 8469 8470 iwn_stop_locked(sc); 8471 iwn_init_locked(sc); 8472 if (vap->iv_state >= IEEE80211_S_AUTH && 8473 (error = iwn_auth(sc, vap)) != 0) { 8474 device_printf(sc->sc_dev, 8475 "%s: could not move to auth state\n", __func__); 8476 } 8477 if (vap->iv_state >= IEEE80211_S_RUN && 8478 (error = iwn_run(sc, vap)) != 0) { 8479 device_printf(sc->sc_dev, 8480 "%s: could not move to run state\n", __func__); 8481 } 8482 8483 /* Only run start once the NIC is in a useful state, like associated */ 8484 iwn_start_locked(sc->sc_ifp); 8485 8486 IWN_UNLOCK(sc); 8487 } 8488 8489 static void 8490 iwn_init_locked(struct iwn_softc *sc) 8491 { 8492 struct ifnet *ifp = sc->sc_ifp; 8493 int error; 8494 8495 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8496 8497 IWN_LOCK_ASSERT(sc); 8498 8499 if ((error = iwn_hw_prepare(sc)) != 0) { 8500 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8501 __func__, error); 8502 goto fail; 8503 } 8504 8505 /* Initialize interrupt mask to default value. */ 8506 sc->int_mask = IWN_INT_MASK_DEF; 8507 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8508 8509 /* Check that the radio is not disabled by hardware switch. */ 8510 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8511 device_printf(sc->sc_dev, 8512 "radio is disabled by hardware switch\n"); 8513 /* Enable interrupts to get RF toggle notifications. */ 8514 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8515 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8516 return; 8517 } 8518 8519 /* Read firmware images from the filesystem. */ 8520 if ((error = iwn_read_firmware(sc)) != 0) { 8521 device_printf(sc->sc_dev, 8522 "%s: could not read firmware, error %d\n", __func__, 8523 error); 8524 goto fail; 8525 } 8526 8527 /* Initialize hardware and upload firmware. */ 8528 error = iwn_hw_init(sc); 8529 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8530 sc->fw_fp = NULL; 8531 if (error != 0) { 8532 device_printf(sc->sc_dev, 8533 "%s: could not initialize hardware, error %d\n", __func__, 8534 error); 8535 goto fail; 8536 } 8537 8538 /* Configure adapter now that it is ready. */ 8539 if ((error = iwn_config(sc)) != 0) { 8540 device_printf(sc->sc_dev, 8541 "%s: could not configure device, error %d\n", __func__, 8542 error); 8543 goto fail; 8544 } 8545 8546 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 8547 ifp->if_drv_flags |= IFF_DRV_RUNNING; 8548 8549 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 8550 8551 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8552 8553 return; 8554 8555 fail: iwn_stop_locked(sc); 8556 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8557 } 8558 8559 static void 8560 iwn_init(void *arg) 8561 { 8562 struct iwn_softc *sc = arg; 8563 struct ifnet *ifp = sc->sc_ifp; 8564 struct ieee80211com *ic = ifp->if_l2com; 8565 8566 IWN_LOCK(sc); 8567 iwn_init_locked(sc); 8568 IWN_UNLOCK(sc); 8569 8570 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 8571 ieee80211_start_all(ic); 8572 } 8573 8574 static void 8575 iwn_stop_locked(struct iwn_softc *sc) 8576 { 8577 struct ifnet *ifp = sc->sc_ifp; 8578 8579 IWN_LOCK_ASSERT(sc); 8580 8581 sc->sc_is_scanning = 0; 8582 sc->sc_tx_timer = 0; 8583 callout_stop(&sc->watchdog_to); 8584 callout_stop(&sc->calib_to); 8585 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 8586 8587 /* Power OFF hardware. */ 8588 iwn_hw_stop(sc); 8589 } 8590 8591 static void 8592 iwn_stop(struct iwn_softc *sc) 8593 { 8594 IWN_LOCK(sc); 8595 iwn_stop_locked(sc); 8596 IWN_UNLOCK(sc); 8597 } 8598 8599 /* 8600 * Callback from net80211 to start a scan. 8601 */ 8602 static void 8603 iwn_scan_start(struct ieee80211com *ic) 8604 { 8605 struct ifnet *ifp = ic->ic_ifp; 8606 struct iwn_softc *sc = ifp->if_softc; 8607 8608 IWN_LOCK(sc); 8609 /* make the link LED blink while we're scanning */ 8610 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8611 IWN_UNLOCK(sc); 8612 } 8613 8614 /* 8615 * Callback from net80211 to terminate a scan. 8616 */ 8617 static void 8618 iwn_scan_end(struct ieee80211com *ic) 8619 { 8620 struct ifnet *ifp = ic->ic_ifp; 8621 struct iwn_softc *sc = ifp->if_softc; 8622 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8623 8624 IWN_LOCK(sc); 8625 if (vap->iv_state == IEEE80211_S_RUN) { 8626 /* Set link LED to ON status if we are associated */ 8627 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8628 } 8629 IWN_UNLOCK(sc); 8630 } 8631 8632 /* 8633 * Callback from net80211 to force a channel change. 8634 */ 8635 static void 8636 iwn_set_channel(struct ieee80211com *ic) 8637 { 8638 const struct ieee80211_channel *c = ic->ic_curchan; 8639 struct ifnet *ifp = ic->ic_ifp; 8640 struct iwn_softc *sc = ifp->if_softc; 8641 int error; 8642 8643 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8644 8645 IWN_LOCK(sc); 8646 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8647 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8648 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8649 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8650 8651 /* 8652 * Only need to set the channel in Monitor mode. AP scanning and auth 8653 * are already taken care of by their respective firmware commands. 8654 */ 8655 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8656 error = iwn_config(sc); 8657 if (error != 0) 8658 device_printf(sc->sc_dev, 8659 "%s: error %d settting channel\n", __func__, error); 8660 } 8661 IWN_UNLOCK(sc); 8662 } 8663 8664 /* 8665 * Callback from net80211 to start scanning of the current channel. 8666 */ 8667 static void 8668 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8669 { 8670 struct ieee80211vap *vap = ss->ss_vap; 8671 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8672 struct ieee80211com *ic = vap->iv_ic; 8673 int error; 8674 8675 IWN_LOCK(sc); 8676 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 8677 IWN_UNLOCK(sc); 8678 if (error != 0) 8679 ieee80211_cancel_scan(vap); 8680 } 8681 8682 /* 8683 * Callback from net80211 to handle the minimum dwell time being met. 8684 * The intent is to terminate the scan but we just let the firmware 8685 * notify us when it's finished as we have no safe way to abort it. 8686 */ 8687 static void 8688 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8689 { 8690 /* NB: don't try to abort scan; wait for firmware to finish */ 8691 } 8692 8693 static void 8694 iwn_hw_reset(void *arg0, int pending) 8695 { 8696 struct iwn_softc *sc = arg0; 8697 struct ifnet *ifp = sc->sc_ifp; 8698 struct ieee80211com *ic = ifp->if_l2com; 8699 8700 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8701 8702 iwn_stop(sc); 8703 iwn_init(sc); 8704 ieee80211_notify_radio(ic, 1); 8705 } 8706 #ifdef IWN_DEBUG 8707 #define IWN_DESC(x) case x: return #x 8708 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8709 8710 /* 8711 * Translate CSR code to string 8712 */ 8713 static char *iwn_get_csr_string(int csr) 8714 { 8715 switch (csr) { 8716 IWN_DESC(IWN_HW_IF_CONFIG); 8717 IWN_DESC(IWN_INT_COALESCING); 8718 IWN_DESC(IWN_INT); 8719 IWN_DESC(IWN_INT_MASK); 8720 IWN_DESC(IWN_FH_INT); 8721 IWN_DESC(IWN_GPIO_IN); 8722 IWN_DESC(IWN_RESET); 8723 IWN_DESC(IWN_GP_CNTRL); 8724 IWN_DESC(IWN_HW_REV); 8725 IWN_DESC(IWN_EEPROM); 8726 IWN_DESC(IWN_EEPROM_GP); 8727 IWN_DESC(IWN_OTP_GP); 8728 IWN_DESC(IWN_GIO); 8729 IWN_DESC(IWN_GP_UCODE); 8730 IWN_DESC(IWN_GP_DRIVER); 8731 IWN_DESC(IWN_UCODE_GP1); 8732 IWN_DESC(IWN_UCODE_GP2); 8733 IWN_DESC(IWN_LED); 8734 IWN_DESC(IWN_DRAM_INT_TBL); 8735 IWN_DESC(IWN_GIO_CHICKEN); 8736 IWN_DESC(IWN_ANA_PLL); 8737 IWN_DESC(IWN_HW_REV_WA); 8738 IWN_DESC(IWN_DBG_HPET_MEM); 8739 default: 8740 return "UNKNOWN CSR"; 8741 } 8742 } 8743 8744 /* 8745 * This function print firmware register 8746 */ 8747 static void 8748 iwn_debug_register(struct iwn_softc *sc) 8749 { 8750 int i; 8751 static const uint32_t csr_tbl[] = { 8752 IWN_HW_IF_CONFIG, 8753 IWN_INT_COALESCING, 8754 IWN_INT, 8755 IWN_INT_MASK, 8756 IWN_FH_INT, 8757 IWN_GPIO_IN, 8758 IWN_RESET, 8759 IWN_GP_CNTRL, 8760 IWN_HW_REV, 8761 IWN_EEPROM, 8762 IWN_EEPROM_GP, 8763 IWN_OTP_GP, 8764 IWN_GIO, 8765 IWN_GP_UCODE, 8766 IWN_GP_DRIVER, 8767 IWN_UCODE_GP1, 8768 IWN_UCODE_GP2, 8769 IWN_LED, 8770 IWN_DRAM_INT_TBL, 8771 IWN_GIO_CHICKEN, 8772 IWN_ANA_PLL, 8773 IWN_HW_REV_WA, 8774 IWN_DBG_HPET_MEM, 8775 }; 8776 DPRINTF(sc, IWN_DEBUG_REGISTER, 8777 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8778 "\n"); 8779 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8780 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8781 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8782 if ((i+1) % 3 == 0) 8783 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8784 } 8785 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8786 } 8787 #endif 8788