1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_wlan.h" 31 #include "opt_iwn.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/sysctl.h> 36 #include <sys/mbuf.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/clock.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ethernet.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 #include <net/if_types.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/if_ether.h> 70 #include <netinet/ip.h> 71 72 #include <net80211/ieee80211_var.h> 73 #include <net80211/ieee80211_radiotap.h> 74 #include <net80211/ieee80211_regdomain.h> 75 #include <net80211/ieee80211_ratectl.h> 76 77 #include <dev/iwn/if_iwnreg.h> 78 #include <dev/iwn/if_iwnvar.h> 79 #include <dev/iwn/if_iwn_devid.h> 80 #include <dev/iwn/if_iwn_chip_cfg.h> 81 #include <dev/iwn/if_iwn_debug.h> 82 #include <dev/iwn/if_iwn_ioctl.h> 83 84 struct iwn_ident { 85 uint16_t vendor; 86 uint16_t device; 87 const char *name; 88 }; 89 90 static const struct iwn_ident iwn_ident_table[] = { 91 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 92 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 93 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 94 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 95 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 96 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 97 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 98 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 99 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 100 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 101 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 102 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 103 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 104 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 105 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 106 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 107 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 108 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 109 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 110 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 111 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 112 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" }, 113 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" }, 114 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, 115 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, 116 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 117 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 118 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 119 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 120 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 121 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 122 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 123 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 124 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 125 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 126 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 127 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 128 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 129 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 130 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 131 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 132 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 133 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 134 { 0, 0, NULL } 135 }; 136 137 static int iwn_probe(device_t); 138 static int iwn_attach(device_t); 139 static int iwn4965_attach(struct iwn_softc *, uint16_t); 140 static int iwn5000_attach(struct iwn_softc *, uint16_t); 141 static int iwn_config_specific(struct iwn_softc *, uint16_t); 142 static void iwn_radiotap_attach(struct iwn_softc *); 143 static void iwn_sysctlattach(struct iwn_softc *); 144 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 145 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 146 const uint8_t [IEEE80211_ADDR_LEN], 147 const uint8_t [IEEE80211_ADDR_LEN]); 148 static void iwn_vap_delete(struct ieee80211vap *); 149 static int iwn_detach(device_t); 150 static int iwn_shutdown(device_t); 151 static int iwn_suspend(device_t); 152 static int iwn_resume(device_t); 153 static int iwn_nic_lock(struct iwn_softc *); 154 static int iwn_eeprom_lock(struct iwn_softc *); 155 static int iwn_init_otprom(struct iwn_softc *); 156 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 157 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 158 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 159 void **, bus_size_t, bus_size_t); 160 static void iwn_dma_contig_free(struct iwn_dma_info *); 161 static int iwn_alloc_sched(struct iwn_softc *); 162 static void iwn_free_sched(struct iwn_softc *); 163 static int iwn_alloc_kw(struct iwn_softc *); 164 static void iwn_free_kw(struct iwn_softc *); 165 static int iwn_alloc_ict(struct iwn_softc *); 166 static void iwn_free_ict(struct iwn_softc *); 167 static int iwn_alloc_fwmem(struct iwn_softc *); 168 static void iwn_free_fwmem(struct iwn_softc *); 169 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 170 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 171 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 172 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 173 int); 174 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 175 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 176 static void iwn5000_ict_reset(struct iwn_softc *); 177 static int iwn_read_eeprom(struct iwn_softc *, 178 uint8_t macaddr[IEEE80211_ADDR_LEN]); 179 static void iwn4965_read_eeprom(struct iwn_softc *); 180 #ifdef IWN_DEBUG 181 static void iwn4965_print_power_group(struct iwn_softc *, int); 182 #endif 183 static void iwn5000_read_eeprom(struct iwn_softc *); 184 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 185 static void iwn_read_eeprom_band(struct iwn_softc *, int); 186 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 187 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 188 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 189 struct ieee80211_channel *); 190 static int iwn_setregdomain(struct ieee80211com *, 191 struct ieee80211_regdomain *, int, 192 struct ieee80211_channel[]); 193 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 194 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 195 const uint8_t mac[IEEE80211_ADDR_LEN]); 196 static void iwn_newassoc(struct ieee80211_node *, int); 197 static int iwn_media_change(struct ifnet *); 198 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 199 static void iwn_calib_timeout(void *); 200 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 201 struct iwn_rx_data *); 202 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 203 struct iwn_rx_data *); 204 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 205 struct iwn_rx_data *); 206 static void iwn5000_rx_calib_results(struct iwn_softc *, 207 struct iwn_rx_desc *, struct iwn_rx_data *); 208 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 209 struct iwn_rx_data *); 210 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 211 struct iwn_rx_data *); 212 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 213 struct iwn_rx_data *); 214 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 215 uint8_t); 216 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 217 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 218 static void iwn_notif_intr(struct iwn_softc *); 219 static void iwn_wakeup_intr(struct iwn_softc *); 220 static void iwn_rftoggle_intr(struct iwn_softc *); 221 static void iwn_fatal_intr(struct iwn_softc *); 222 static void iwn_intr(void *); 223 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 224 uint16_t); 225 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 226 uint16_t); 227 #ifdef notyet 228 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 229 #endif 230 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 231 struct ieee80211_node *); 232 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 233 struct ieee80211_node *, 234 const struct ieee80211_bpf_params *params); 235 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 236 const struct ieee80211_bpf_params *); 237 static void iwn_start(struct ifnet *); 238 static void iwn_start_locked(struct ifnet *); 239 static void iwn_watchdog(void *); 240 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 241 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 242 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 243 int); 244 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 245 int); 246 static int iwn_set_link_quality(struct iwn_softc *, 247 struct ieee80211_node *); 248 static int iwn_add_broadcast_node(struct iwn_softc *, int); 249 static int iwn_updateedca(struct ieee80211com *); 250 static void iwn_update_mcast(struct ifnet *); 251 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 252 static int iwn_set_critical_temp(struct iwn_softc *); 253 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 254 static void iwn4965_power_calibration(struct iwn_softc *, int); 255 static int iwn4965_set_txpower(struct iwn_softc *, 256 struct ieee80211_channel *, int); 257 static int iwn5000_set_txpower(struct iwn_softc *, 258 struct ieee80211_channel *, int); 259 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 260 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 261 static int iwn_get_noise(const struct iwn_rx_general_stats *); 262 static int iwn4965_get_temperature(struct iwn_softc *); 263 static int iwn5000_get_temperature(struct iwn_softc *); 264 static int iwn_init_sensitivity(struct iwn_softc *); 265 static void iwn_collect_noise(struct iwn_softc *, 266 const struct iwn_rx_general_stats *); 267 static int iwn4965_init_gains(struct iwn_softc *); 268 static int iwn5000_init_gains(struct iwn_softc *); 269 static int iwn4965_set_gains(struct iwn_softc *); 270 static int iwn5000_set_gains(struct iwn_softc *); 271 static void iwn_tune_sensitivity(struct iwn_softc *, 272 const struct iwn_rx_stats *); 273 static void iwn_save_stats_counters(struct iwn_softc *, 274 const struct iwn_stats *); 275 static int iwn_send_sensitivity(struct iwn_softc *); 276 static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 277 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 278 static int iwn_send_btcoex(struct iwn_softc *); 279 static int iwn_send_advanced_btcoex(struct iwn_softc *); 280 static int iwn5000_runtime_calib(struct iwn_softc *); 281 static int iwn_config(struct iwn_softc *); 282 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 283 static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 284 struct ieee80211_scan_state *, struct ieee80211_channel *); 285 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 286 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 287 static int iwn_ampdu_rx_start(struct ieee80211_node *, 288 struct ieee80211_rx_ampdu *, int, int, int); 289 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 290 struct ieee80211_rx_ampdu *); 291 static int iwn_addba_request(struct ieee80211_node *, 292 struct ieee80211_tx_ampdu *, int, int, int); 293 static int iwn_addba_response(struct ieee80211_node *, 294 struct ieee80211_tx_ampdu *, int, int, int); 295 static int iwn_ampdu_tx_start(struct ieee80211com *, 296 struct ieee80211_node *, uint8_t); 297 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 298 struct ieee80211_tx_ampdu *); 299 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 300 struct ieee80211_node *, int, uint8_t, uint16_t); 301 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 302 uint8_t, uint16_t); 303 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 304 struct ieee80211_node *, int, uint8_t, uint16_t); 305 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 306 uint8_t, uint16_t); 307 static int iwn5000_query_calibration(struct iwn_softc *); 308 static int iwn5000_send_calibration(struct iwn_softc *); 309 static int iwn5000_send_wimax_coex(struct iwn_softc *); 310 static int iwn5000_crystal_calib(struct iwn_softc *); 311 static int iwn5000_temp_offset_calib(struct iwn_softc *); 312 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 313 static int iwn4965_post_alive(struct iwn_softc *); 314 static int iwn5000_post_alive(struct iwn_softc *); 315 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 316 int); 317 static int iwn4965_load_firmware(struct iwn_softc *); 318 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 319 const uint8_t *, int); 320 static int iwn5000_load_firmware(struct iwn_softc *); 321 static int iwn_read_firmware_leg(struct iwn_softc *, 322 struct iwn_fw_info *); 323 static int iwn_read_firmware_tlv(struct iwn_softc *, 324 struct iwn_fw_info *, uint16_t); 325 static int iwn_read_firmware(struct iwn_softc *); 326 static int iwn_clock_wait(struct iwn_softc *); 327 static int iwn_apm_init(struct iwn_softc *); 328 static void iwn_apm_stop_master(struct iwn_softc *); 329 static void iwn_apm_stop(struct iwn_softc *); 330 static int iwn4965_nic_config(struct iwn_softc *); 331 static int iwn5000_nic_config(struct iwn_softc *); 332 static int iwn_hw_prepare(struct iwn_softc *); 333 static int iwn_hw_init(struct iwn_softc *); 334 static void iwn_hw_stop(struct iwn_softc *); 335 static void iwn_radio_on(void *, int); 336 static void iwn_radio_off(void *, int); 337 static void iwn_panicked(void *, int); 338 static void iwn_init_locked(struct iwn_softc *); 339 static void iwn_init(void *); 340 static void iwn_stop_locked(struct iwn_softc *); 341 static void iwn_stop(struct iwn_softc *); 342 static void iwn_scan_start(struct ieee80211com *); 343 static void iwn_scan_end(struct ieee80211com *); 344 static void iwn_set_channel(struct ieee80211com *); 345 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 346 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 347 static void iwn_hw_reset(void *, int); 348 #ifdef IWN_DEBUG 349 static char *iwn_get_csr_string(int); 350 static void iwn_debug_register(struct iwn_softc *); 351 #endif 352 353 static device_method_t iwn_methods[] = { 354 /* Device interface */ 355 DEVMETHOD(device_probe, iwn_probe), 356 DEVMETHOD(device_attach, iwn_attach), 357 DEVMETHOD(device_detach, iwn_detach), 358 DEVMETHOD(device_shutdown, iwn_shutdown), 359 DEVMETHOD(device_suspend, iwn_suspend), 360 DEVMETHOD(device_resume, iwn_resume), 361 362 DEVMETHOD_END 363 }; 364 365 static driver_t iwn_driver = { 366 "iwn", 367 iwn_methods, 368 sizeof(struct iwn_softc) 369 }; 370 static devclass_t iwn_devclass; 371 372 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 373 374 MODULE_VERSION(iwn, 1); 375 376 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 377 MODULE_DEPEND(iwn, pci, 1, 1, 1); 378 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 379 380 static int 381 iwn_probe(device_t dev) 382 { 383 const struct iwn_ident *ident; 384 385 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 386 if (pci_get_vendor(dev) == ident->vendor && 387 pci_get_device(dev) == ident->device) { 388 device_set_desc(dev, ident->name); 389 return (BUS_PROBE_DEFAULT); 390 } 391 } 392 return ENXIO; 393 } 394 395 static int 396 iwn_attach(device_t dev) 397 { 398 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 399 struct ieee80211com *ic; 400 struct ifnet *ifp; 401 int i, error, rid; 402 uint8_t macaddr[IEEE80211_ADDR_LEN]; 403 404 sc->sc_dev = dev; 405 406 #ifdef IWN_DEBUG 407 error = resource_int_value(device_get_name(sc->sc_dev), 408 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 409 if (error != 0) 410 sc->sc_debug = 0; 411 #else 412 sc->sc_debug = 0; 413 #endif 414 415 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 416 417 /* 418 * Get the offset of the PCI Express Capability Structure in PCI 419 * Configuration Space. 420 */ 421 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 422 if (error != 0) { 423 device_printf(dev, "PCIe capability structure not found!\n"); 424 return error; 425 } 426 427 /* Clear device-specific "PCI retry timeout" register (41h). */ 428 pci_write_config(dev, 0x41, 0, 1); 429 430 /* Enable bus-mastering. */ 431 pci_enable_busmaster(dev); 432 433 rid = PCIR_BAR(0); 434 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 435 RF_ACTIVE); 436 if (sc->mem == NULL) { 437 device_printf(dev, "can't map mem space\n"); 438 error = ENOMEM; 439 return error; 440 } 441 sc->sc_st = rman_get_bustag(sc->mem); 442 sc->sc_sh = rman_get_bushandle(sc->mem); 443 444 i = 1; 445 rid = 0; 446 if (pci_alloc_msi(dev, &i) == 0) 447 rid = 1; 448 /* Install interrupt handler. */ 449 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 450 (rid != 0 ? 0 : RF_SHAREABLE)); 451 if (sc->irq == NULL) { 452 device_printf(dev, "can't map interrupt\n"); 453 error = ENOMEM; 454 goto fail; 455 } 456 457 IWN_LOCK_INIT(sc); 458 459 /* Read hardware revision and attach. */ 460 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 461 & IWN_HW_REV_TYPE_MASK; 462 sc->subdevice_id = pci_get_subdevice(dev); 463 464 /* 465 * 4965 versus 5000 and later have different methods. 466 * Let's set those up first. 467 */ 468 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 469 error = iwn4965_attach(sc, pci_get_device(dev)); 470 else 471 error = iwn5000_attach(sc, pci_get_device(dev)); 472 if (error != 0) { 473 device_printf(dev, "could not attach device, error %d\n", 474 error); 475 goto fail; 476 } 477 478 /* 479 * Next, let's setup the various parameters of each NIC. 480 */ 481 error = iwn_config_specific(sc, pci_get_device(dev)); 482 if (error != 0) { 483 device_printf(dev, "could not attach device, error %d\n", 484 error); 485 goto fail; 486 } 487 488 if ((error = iwn_hw_prepare(sc)) != 0) { 489 device_printf(dev, "hardware not ready, error %d\n", error); 490 goto fail; 491 } 492 493 /* Allocate DMA memory for firmware transfers. */ 494 if ((error = iwn_alloc_fwmem(sc)) != 0) { 495 device_printf(dev, 496 "could not allocate memory for firmware, error %d\n", 497 error); 498 goto fail; 499 } 500 501 /* Allocate "Keep Warm" page. */ 502 if ((error = iwn_alloc_kw(sc)) != 0) { 503 device_printf(dev, 504 "could not allocate keep warm page, error %d\n", error); 505 goto fail; 506 } 507 508 /* Allocate ICT table for 5000 Series. */ 509 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 510 (error = iwn_alloc_ict(sc)) != 0) { 511 device_printf(dev, "could not allocate ICT table, error %d\n", 512 error); 513 goto fail; 514 } 515 516 /* Allocate TX scheduler "rings". */ 517 if ((error = iwn_alloc_sched(sc)) != 0) { 518 device_printf(dev, 519 "could not allocate TX scheduler rings, error %d\n", error); 520 goto fail; 521 } 522 523 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 524 for (i = 0; i < sc->ntxqs; i++) { 525 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 526 device_printf(dev, 527 "could not allocate TX ring %d, error %d\n", i, 528 error); 529 goto fail; 530 } 531 } 532 533 /* Allocate RX ring. */ 534 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 535 device_printf(dev, "could not allocate RX ring, error %d\n", 536 error); 537 goto fail; 538 } 539 540 /* Clear pending interrupts. */ 541 IWN_WRITE(sc, IWN_INT, 0xffffffff); 542 543 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 544 if (ifp == NULL) { 545 device_printf(dev, "can not allocate ifnet structure\n"); 546 goto fail; 547 } 548 549 ic = ifp->if_l2com; 550 ic->ic_ifp = ifp; 551 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 552 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 553 554 /* Set device capabilities. */ 555 ic->ic_caps = 556 IEEE80211_C_STA /* station mode supported */ 557 | IEEE80211_C_MONITOR /* monitor mode supported */ 558 | IEEE80211_C_BGSCAN /* background scanning */ 559 | IEEE80211_C_TXPMGT /* tx power management */ 560 | IEEE80211_C_SHSLOT /* short slot time supported */ 561 | IEEE80211_C_WPA 562 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 563 #if 0 564 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 565 #endif 566 | IEEE80211_C_WME /* WME */ 567 | IEEE80211_C_PMGT /* Station-side power mgmt */ 568 ; 569 570 /* Read MAC address, channels, etc from EEPROM. */ 571 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 572 device_printf(dev, "could not read EEPROM, error %d\n", 573 error); 574 goto fail; 575 } 576 577 /* Count the number of available chains. */ 578 sc->ntxchains = 579 ((sc->txchainmask >> 2) & 1) + 580 ((sc->txchainmask >> 1) & 1) + 581 ((sc->txchainmask >> 0) & 1); 582 sc->nrxchains = 583 ((sc->rxchainmask >> 2) & 1) + 584 ((sc->rxchainmask >> 1) & 1) + 585 ((sc->rxchainmask >> 0) & 1); 586 if (bootverbose) { 587 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 588 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 589 macaddr, ":"); 590 } 591 592 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 593 ic->ic_rxstream = sc->nrxchains; 594 ic->ic_txstream = sc->ntxchains; 595 596 /* 597 * The NICs we currently support cap out at 2x2 support 598 * separate from the chains being used. 599 * 600 * This is a total hack to work around that until some 601 * per-device method is implemented to return the 602 * actual stream support. 603 * 604 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 605 * cap this! But, anything that touches rates in the 606 * driver needs to be audited first before 3x3 is enabled. 607 */ 608 if (ic->ic_rxstream > 2) 609 ic->ic_rxstream = 2; 610 if (ic->ic_txstream > 2) 611 ic->ic_txstream = 2; 612 613 ic->ic_htcaps = 614 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 615 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 616 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 617 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 618 #ifdef notyet 619 | IEEE80211_HTCAP_GREENFIELD 620 #if IWN_RBUF_SIZE == 8192 621 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 622 #else 623 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 624 #endif 625 #endif 626 /* s/w capabilities */ 627 | IEEE80211_HTC_HT /* HT operation */ 628 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 629 #ifdef notyet 630 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 631 #endif 632 ; 633 } 634 635 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 636 ifp->if_softc = sc; 637 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 638 ifp->if_init = iwn_init; 639 ifp->if_ioctl = iwn_ioctl; 640 ifp->if_start = iwn_start; 641 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 642 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 643 IFQ_SET_READY(&ifp->if_snd); 644 645 ieee80211_ifattach(ic, macaddr); 646 ic->ic_vap_create = iwn_vap_create; 647 ic->ic_vap_delete = iwn_vap_delete; 648 ic->ic_raw_xmit = iwn_raw_xmit; 649 ic->ic_node_alloc = iwn_node_alloc; 650 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 651 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 652 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 653 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 654 sc->sc_addba_request = ic->ic_addba_request; 655 ic->ic_addba_request = iwn_addba_request; 656 sc->sc_addba_response = ic->ic_addba_response; 657 ic->ic_addba_response = iwn_addba_response; 658 sc->sc_addba_stop = ic->ic_addba_stop; 659 ic->ic_addba_stop = iwn_ampdu_tx_stop; 660 ic->ic_newassoc = iwn_newassoc; 661 ic->ic_wme.wme_update = iwn_updateedca; 662 ic->ic_update_mcast = iwn_update_mcast; 663 ic->ic_scan_start = iwn_scan_start; 664 ic->ic_scan_end = iwn_scan_end; 665 ic->ic_set_channel = iwn_set_channel; 666 ic->ic_scan_curchan = iwn_scan_curchan; 667 ic->ic_scan_mindwell = iwn_scan_mindwell; 668 ic->ic_setregdomain = iwn_setregdomain; 669 670 iwn_radiotap_attach(sc); 671 672 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 673 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 674 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 675 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 676 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 677 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc); 678 679 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK, 680 taskqueue_thread_enqueue, &sc->sc_tq); 681 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq"); 682 if (error != 0) { 683 device_printf(dev, "can't start threads, error %d\n", error); 684 goto fail; 685 } 686 687 iwn_sysctlattach(sc); 688 689 /* 690 * Hook our interrupt after all initialization is complete. 691 */ 692 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 693 NULL, iwn_intr, sc, &sc->sc_ih); 694 if (error != 0) { 695 device_printf(dev, "can't establish interrupt, error %d\n", 696 error); 697 goto fail; 698 } 699 700 #if 0 701 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n", 702 __func__, 703 sizeof(struct iwn_stats), 704 sizeof(struct iwn_stats_bt)); 705 #endif 706 707 if (bootverbose) 708 ieee80211_announce(ic); 709 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 710 return 0; 711 fail: 712 iwn_detach(dev); 713 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 714 return error; 715 } 716 717 /* 718 * Define specific configuration based on device id and subdevice id 719 * pid : PCI device id 720 */ 721 static int 722 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 723 { 724 725 switch (pid) { 726 /* 4965 series */ 727 case IWN_DID_4965_1: 728 case IWN_DID_4965_2: 729 case IWN_DID_4965_3: 730 case IWN_DID_4965_4: 731 sc->base_params = &iwn4965_base_params; 732 sc->limits = &iwn4965_sensitivity_limits; 733 sc->fwname = "iwn4965fw"; 734 /* Override chains masks, ROM is known to be broken. */ 735 sc->txchainmask = IWN_ANT_AB; 736 sc->rxchainmask = IWN_ANT_ABC; 737 /* Enable normal btcoex */ 738 sc->sc_flags |= IWN_FLAG_BTCOEX; 739 break; 740 /* 1000 Series */ 741 case IWN_DID_1000_1: 742 case IWN_DID_1000_2: 743 switch(sc->subdevice_id) { 744 case IWN_SDID_1000_1: 745 case IWN_SDID_1000_2: 746 case IWN_SDID_1000_3: 747 case IWN_SDID_1000_4: 748 case IWN_SDID_1000_5: 749 case IWN_SDID_1000_6: 750 case IWN_SDID_1000_7: 751 case IWN_SDID_1000_8: 752 case IWN_SDID_1000_9: 753 case IWN_SDID_1000_10: 754 case IWN_SDID_1000_11: 755 case IWN_SDID_1000_12: 756 sc->limits = &iwn1000_sensitivity_limits; 757 sc->base_params = &iwn1000_base_params; 758 sc->fwname = "iwn1000fw"; 759 break; 760 default: 761 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 762 "0x%04x rev %d not supported (subdevice)\n", pid, 763 sc->subdevice_id,sc->hw_type); 764 return ENOTSUP; 765 } 766 break; 767 /* 6x00 Series */ 768 case IWN_DID_6x00_2: 769 case IWN_DID_6x00_4: 770 case IWN_DID_6x00_1: 771 case IWN_DID_6x00_3: 772 sc->fwname = "iwn6000fw"; 773 sc->limits = &iwn6000_sensitivity_limits; 774 switch(sc->subdevice_id) { 775 case IWN_SDID_6x00_1: 776 case IWN_SDID_6x00_2: 777 case IWN_SDID_6x00_8: 778 //iwl6000_3agn_cfg 779 sc->base_params = &iwn_6000_base_params; 780 break; 781 case IWN_SDID_6x00_3: 782 case IWN_SDID_6x00_6: 783 case IWN_SDID_6x00_9: 784 ////iwl6000i_2agn 785 case IWN_SDID_6x00_4: 786 case IWN_SDID_6x00_7: 787 case IWN_SDID_6x00_10: 788 //iwl6000i_2abg_cfg 789 case IWN_SDID_6x00_5: 790 //iwl6000i_2bg_cfg 791 sc->base_params = &iwn_6000i_base_params; 792 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 793 sc->txchainmask = IWN_ANT_BC; 794 sc->rxchainmask = IWN_ANT_BC; 795 break; 796 default: 797 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 798 "0x%04x rev %d not supported (subdevice)\n", pid, 799 sc->subdevice_id,sc->hw_type); 800 return ENOTSUP; 801 } 802 break; 803 /* 6x05 Series */ 804 case IWN_DID_6x05_1: 805 case IWN_DID_6x05_2: 806 switch(sc->subdevice_id) { 807 case IWN_SDID_6x05_1: 808 case IWN_SDID_6x05_4: 809 case IWN_SDID_6x05_6: 810 //iwl6005_2agn_cfg 811 case IWN_SDID_6x05_2: 812 case IWN_SDID_6x05_5: 813 case IWN_SDID_6x05_7: 814 //iwl6005_2abg_cfg 815 case IWN_SDID_6x05_3: 816 //iwl6005_2bg_cfg 817 case IWN_SDID_6x05_8: 818 case IWN_SDID_6x05_9: 819 //iwl6005_2agn_sff_cfg 820 case IWN_SDID_6x05_10: 821 //iwl6005_2agn_d_cfg 822 case IWN_SDID_6x05_11: 823 //iwl6005_2agn_mow1_cfg 824 case IWN_SDID_6x05_12: 825 //iwl6005_2agn_mow2_cfg 826 sc->fwname = "iwn6000g2afw"; 827 sc->limits = &iwn6000_sensitivity_limits; 828 sc->base_params = &iwn_6000g2_base_params; 829 break; 830 default: 831 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 832 "0x%04x rev %d not supported (subdevice)\n", pid, 833 sc->subdevice_id,sc->hw_type); 834 return ENOTSUP; 835 } 836 break; 837 /* 6x35 Series */ 838 case IWN_DID_6035_1: 839 case IWN_DID_6035_2: 840 switch(sc->subdevice_id) { 841 case IWN_SDID_6035_1: 842 case IWN_SDID_6035_2: 843 case IWN_SDID_6035_3: 844 case IWN_SDID_6035_4: 845 sc->fwname = "iwn6000g2bfw"; 846 sc->limits = &iwn6235_sensitivity_limits; 847 sc->base_params = &iwn_6235_base_params; 848 break; 849 default: 850 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 851 "0x%04x rev %d not supported (subdevice)\n", pid, 852 sc->subdevice_id,sc->hw_type); 853 return ENOTSUP; 854 } 855 break; 856 /* 6x50 WiFi/WiMax Series */ 857 case IWN_DID_6050_1: 858 case IWN_DID_6050_2: 859 switch(sc->subdevice_id) { 860 case IWN_SDID_6050_1: 861 case IWN_SDID_6050_3: 862 case IWN_SDID_6050_5: 863 //iwl6050_2agn_cfg 864 case IWN_SDID_6050_2: 865 case IWN_SDID_6050_4: 866 case IWN_SDID_6050_6: 867 //iwl6050_2abg_cfg 868 sc->fwname = "iwn6050fw"; 869 sc->txchainmask = IWN_ANT_AB; 870 sc->rxchainmask = IWN_ANT_AB; 871 sc->limits = &iwn6000_sensitivity_limits; 872 sc->base_params = &iwn_6050_base_params; 873 break; 874 default: 875 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 876 "0x%04x rev %d not supported (subdevice)\n", pid, 877 sc->subdevice_id,sc->hw_type); 878 return ENOTSUP; 879 } 880 break; 881 /* 6150 WiFi/WiMax Series */ 882 case IWN_DID_6150_1: 883 case IWN_DID_6150_2: 884 switch(sc->subdevice_id) { 885 case IWN_SDID_6150_1: 886 case IWN_SDID_6150_3: 887 case IWN_SDID_6150_5: 888 // iwl6150_bgn_cfg 889 case IWN_SDID_6150_2: 890 case IWN_SDID_6150_4: 891 case IWN_SDID_6150_6: 892 //iwl6150_bg_cfg 893 sc->fwname = "iwn6050fw"; 894 sc->limits = &iwn6000_sensitivity_limits; 895 sc->base_params = &iwn_6150_base_params; 896 break; 897 default: 898 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 899 "0x%04x rev %d not supported (subdevice)\n", pid, 900 sc->subdevice_id,sc->hw_type); 901 return ENOTSUP; 902 } 903 break; 904 /* 6030 Series and 1030 Series */ 905 case IWN_DID_x030_1: 906 case IWN_DID_x030_2: 907 case IWN_DID_x030_3: 908 case IWN_DID_x030_4: 909 switch(sc->subdevice_id) { 910 case IWN_SDID_x030_1: 911 case IWN_SDID_x030_3: 912 case IWN_SDID_x030_5: 913 // iwl1030_bgn_cfg 914 case IWN_SDID_x030_2: 915 case IWN_SDID_x030_4: 916 case IWN_SDID_x030_6: 917 //iwl1030_bg_cfg 918 case IWN_SDID_x030_7: 919 case IWN_SDID_x030_10: 920 case IWN_SDID_x030_14: 921 //iwl6030_2agn_cfg 922 case IWN_SDID_x030_8: 923 case IWN_SDID_x030_11: 924 case IWN_SDID_x030_15: 925 // iwl6030_2bgn_cfg 926 case IWN_SDID_x030_9: 927 case IWN_SDID_x030_12: 928 case IWN_SDID_x030_16: 929 // iwl6030_2abg_cfg 930 case IWN_SDID_x030_13: 931 //iwl6030_2bg_cfg 932 sc->fwname = "iwn6000g2bfw"; 933 sc->limits = &iwn6000_sensitivity_limits; 934 sc->base_params = &iwn_6000g2b_base_params; 935 break; 936 default: 937 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 938 "0x%04x rev %d not supported (subdevice)\n", pid, 939 sc->subdevice_id,sc->hw_type); 940 return ENOTSUP; 941 } 942 break; 943 /* 130 Series WiFi */ 944 /* XXX: This series will need adjustment for rate. 945 * see rx_with_siso_diversity in linux kernel 946 */ 947 case IWN_DID_130_1: 948 case IWN_DID_130_2: 949 switch(sc->subdevice_id) { 950 case IWN_SDID_130_1: 951 case IWN_SDID_130_3: 952 case IWN_SDID_130_5: 953 //iwl130_bgn_cfg 954 case IWN_SDID_130_2: 955 case IWN_SDID_130_4: 956 case IWN_SDID_130_6: 957 //iwl130_bg_cfg 958 sc->fwname = "iwn6000g2bfw"; 959 sc->limits = &iwn6000_sensitivity_limits; 960 sc->base_params = &iwn_6000g2b_base_params; 961 break; 962 default: 963 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 964 "0x%04x rev %d not supported (subdevice)\n", pid, 965 sc->subdevice_id,sc->hw_type); 966 return ENOTSUP; 967 } 968 break; 969 /* 100 Series WiFi */ 970 case IWN_DID_100_1: 971 case IWN_DID_100_2: 972 switch(sc->subdevice_id) { 973 case IWN_SDID_100_1: 974 case IWN_SDID_100_2: 975 case IWN_SDID_100_3: 976 case IWN_SDID_100_4: 977 case IWN_SDID_100_5: 978 case IWN_SDID_100_6: 979 sc->limits = &iwn1000_sensitivity_limits; 980 sc->base_params = &iwn1000_base_params; 981 sc->fwname = "iwn100fw"; 982 break; 983 default: 984 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 985 "0x%04x rev %d not supported (subdevice)\n", pid, 986 sc->subdevice_id,sc->hw_type); 987 return ENOTSUP; 988 } 989 break; 990 991 /* 105 Series */ 992 /* XXX: This series will need adjustment for rate. 993 * see rx_with_siso_diversity in linux kernel 994 */ 995 case IWN_DID_105_1: 996 case IWN_DID_105_2: 997 switch(sc->subdevice_id) { 998 case IWN_SDID_105_1: 999 case IWN_SDID_105_2: 1000 case IWN_SDID_105_3: 1001 //iwl105_bgn_cfg 1002 case IWN_SDID_105_4: 1003 //iwl105_bgn_d_cfg 1004 sc->limits = &iwn2030_sensitivity_limits; 1005 sc->base_params = &iwn2000_base_params; 1006 sc->fwname = "iwn105fw"; 1007 break; 1008 default: 1009 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1010 "0x%04x rev %d not supported (subdevice)\n", pid, 1011 sc->subdevice_id,sc->hw_type); 1012 return ENOTSUP; 1013 } 1014 break; 1015 1016 /* 135 Series */ 1017 /* XXX: This series will need adjustment for rate. 1018 * see rx_with_siso_diversity in linux kernel 1019 */ 1020 case IWN_DID_135_1: 1021 case IWN_DID_135_2: 1022 switch(sc->subdevice_id) { 1023 case IWN_SDID_135_1: 1024 case IWN_SDID_135_2: 1025 case IWN_SDID_135_3: 1026 sc->limits = &iwn2030_sensitivity_limits; 1027 sc->base_params = &iwn2030_base_params; 1028 sc->fwname = "iwn135fw"; 1029 break; 1030 default: 1031 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1032 "0x%04x rev %d not supported (subdevice)\n", pid, 1033 sc->subdevice_id,sc->hw_type); 1034 return ENOTSUP; 1035 } 1036 break; 1037 1038 /* 2x00 Series */ 1039 case IWN_DID_2x00_1: 1040 case IWN_DID_2x00_2: 1041 switch(sc->subdevice_id) { 1042 case IWN_SDID_2x00_1: 1043 case IWN_SDID_2x00_2: 1044 case IWN_SDID_2x00_3: 1045 //iwl2000_2bgn_cfg 1046 case IWN_SDID_2x00_4: 1047 //iwl2000_2bgn_d_cfg 1048 sc->limits = &iwn2030_sensitivity_limits; 1049 sc->base_params = &iwn2000_base_params; 1050 sc->fwname = "iwn2000fw"; 1051 break; 1052 default: 1053 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1054 "0x%04x rev %d not supported (subdevice) \n", 1055 pid, sc->subdevice_id, sc->hw_type); 1056 return ENOTSUP; 1057 } 1058 break; 1059 /* 2x30 Series */ 1060 case IWN_DID_2x30_1: 1061 case IWN_DID_2x30_2: 1062 switch(sc->subdevice_id) { 1063 case IWN_SDID_2x30_1: 1064 case IWN_SDID_2x30_3: 1065 case IWN_SDID_2x30_5: 1066 //iwl100_bgn_cfg 1067 case IWN_SDID_2x30_2: 1068 case IWN_SDID_2x30_4: 1069 case IWN_SDID_2x30_6: 1070 //iwl100_bg_cfg 1071 sc->limits = &iwn2030_sensitivity_limits; 1072 sc->base_params = &iwn2030_base_params; 1073 sc->fwname = "iwn2030fw"; 1074 break; 1075 default: 1076 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1077 "0x%04x rev %d not supported (subdevice)\n", pid, 1078 sc->subdevice_id,sc->hw_type); 1079 return ENOTSUP; 1080 } 1081 break; 1082 /* 5x00 Series */ 1083 case IWN_DID_5x00_1: 1084 case IWN_DID_5x00_2: 1085 case IWN_DID_5x00_3: 1086 case IWN_DID_5x00_4: 1087 sc->limits = &iwn5000_sensitivity_limits; 1088 sc->base_params = &iwn5000_base_params; 1089 sc->fwname = "iwn5000fw"; 1090 switch(sc->subdevice_id) { 1091 case IWN_SDID_5x00_1: 1092 case IWN_SDID_5x00_2: 1093 case IWN_SDID_5x00_3: 1094 case IWN_SDID_5x00_4: 1095 case IWN_SDID_5x00_9: 1096 case IWN_SDID_5x00_10: 1097 case IWN_SDID_5x00_11: 1098 case IWN_SDID_5x00_12: 1099 case IWN_SDID_5x00_17: 1100 case IWN_SDID_5x00_18: 1101 case IWN_SDID_5x00_19: 1102 case IWN_SDID_5x00_20: 1103 //iwl5100_agn_cfg 1104 sc->txchainmask = IWN_ANT_B; 1105 sc->rxchainmask = IWN_ANT_AB; 1106 break; 1107 case IWN_SDID_5x00_5: 1108 case IWN_SDID_5x00_6: 1109 case IWN_SDID_5x00_13: 1110 case IWN_SDID_5x00_14: 1111 case IWN_SDID_5x00_21: 1112 case IWN_SDID_5x00_22: 1113 //iwl5100_bgn_cfg 1114 sc->txchainmask = IWN_ANT_B; 1115 sc->rxchainmask = IWN_ANT_AB; 1116 break; 1117 case IWN_SDID_5x00_7: 1118 case IWN_SDID_5x00_8: 1119 case IWN_SDID_5x00_15: 1120 case IWN_SDID_5x00_16: 1121 case IWN_SDID_5x00_23: 1122 case IWN_SDID_5x00_24: 1123 //iwl5100_abg_cfg 1124 sc->txchainmask = IWN_ANT_B; 1125 sc->rxchainmask = IWN_ANT_AB; 1126 break; 1127 case IWN_SDID_5x00_25: 1128 case IWN_SDID_5x00_26: 1129 case IWN_SDID_5x00_27: 1130 case IWN_SDID_5x00_28: 1131 case IWN_SDID_5x00_29: 1132 case IWN_SDID_5x00_30: 1133 case IWN_SDID_5x00_31: 1134 case IWN_SDID_5x00_32: 1135 case IWN_SDID_5x00_33: 1136 case IWN_SDID_5x00_34: 1137 case IWN_SDID_5x00_35: 1138 case IWN_SDID_5x00_36: 1139 //iwl5300_agn_cfg 1140 sc->txchainmask = IWN_ANT_ABC; 1141 sc->rxchainmask = IWN_ANT_ABC; 1142 break; 1143 default: 1144 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1145 "0x%04x rev %d not supported (subdevice)\n", pid, 1146 sc->subdevice_id,sc->hw_type); 1147 return ENOTSUP; 1148 } 1149 break; 1150 /* 5x50 Series */ 1151 case IWN_DID_5x50_1: 1152 case IWN_DID_5x50_2: 1153 case IWN_DID_5x50_3: 1154 case IWN_DID_5x50_4: 1155 sc->limits = &iwn5000_sensitivity_limits; 1156 sc->base_params = &iwn5000_base_params; 1157 sc->fwname = "iwn5000fw"; 1158 switch(sc->subdevice_id) { 1159 case IWN_SDID_5x50_1: 1160 case IWN_SDID_5x50_2: 1161 case IWN_SDID_5x50_3: 1162 //iwl5350_agn_cfg 1163 sc->limits = &iwn5000_sensitivity_limits; 1164 sc->base_params = &iwn5000_base_params; 1165 sc->fwname = "iwn5000fw"; 1166 break; 1167 case IWN_SDID_5x50_4: 1168 case IWN_SDID_5x50_5: 1169 case IWN_SDID_5x50_8: 1170 case IWN_SDID_5x50_9: 1171 case IWN_SDID_5x50_10: 1172 case IWN_SDID_5x50_11: 1173 //iwl5150_agn_cfg 1174 case IWN_SDID_5x50_6: 1175 case IWN_SDID_5x50_7: 1176 case IWN_SDID_5x50_12: 1177 case IWN_SDID_5x50_13: 1178 //iwl5150_abg_cfg 1179 sc->limits = &iwn5000_sensitivity_limits; 1180 sc->fwname = "iwn5150fw"; 1181 sc->base_params = &iwn_5x50_base_params; 1182 break; 1183 default: 1184 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1185 "0x%04x rev %d not supported (subdevice)\n", pid, 1186 sc->subdevice_id,sc->hw_type); 1187 return ENOTSUP; 1188 } 1189 break; 1190 default: 1191 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1192 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1193 sc->hw_type); 1194 return ENOTSUP; 1195 } 1196 return 0; 1197 } 1198 1199 static int 1200 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1201 { 1202 struct iwn_ops *ops = &sc->ops; 1203 1204 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1205 ops->load_firmware = iwn4965_load_firmware; 1206 ops->read_eeprom = iwn4965_read_eeprom; 1207 ops->post_alive = iwn4965_post_alive; 1208 ops->nic_config = iwn4965_nic_config; 1209 ops->update_sched = iwn4965_update_sched; 1210 ops->get_temperature = iwn4965_get_temperature; 1211 ops->get_rssi = iwn4965_get_rssi; 1212 ops->set_txpower = iwn4965_set_txpower; 1213 ops->init_gains = iwn4965_init_gains; 1214 ops->set_gains = iwn4965_set_gains; 1215 ops->add_node = iwn4965_add_node; 1216 ops->tx_done = iwn4965_tx_done; 1217 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1218 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1219 sc->ntxqs = IWN4965_NTXQUEUES; 1220 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1221 sc->ndmachnls = IWN4965_NDMACHNLS; 1222 sc->broadcast_id = IWN4965_ID_BROADCAST; 1223 sc->rxonsz = IWN4965_RXONSZ; 1224 sc->schedsz = IWN4965_SCHEDSZ; 1225 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1226 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1227 sc->fwsz = IWN4965_FWSZ; 1228 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1229 sc->limits = &iwn4965_sensitivity_limits; 1230 sc->fwname = "iwn4965fw"; 1231 /* Override chains masks, ROM is known to be broken. */ 1232 sc->txchainmask = IWN_ANT_AB; 1233 sc->rxchainmask = IWN_ANT_ABC; 1234 /* Enable normal btcoex */ 1235 sc->sc_flags |= IWN_FLAG_BTCOEX; 1236 1237 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1238 1239 return 0; 1240 } 1241 1242 static int 1243 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1244 { 1245 struct iwn_ops *ops = &sc->ops; 1246 1247 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1248 1249 ops->load_firmware = iwn5000_load_firmware; 1250 ops->read_eeprom = iwn5000_read_eeprom; 1251 ops->post_alive = iwn5000_post_alive; 1252 ops->nic_config = iwn5000_nic_config; 1253 ops->update_sched = iwn5000_update_sched; 1254 ops->get_temperature = iwn5000_get_temperature; 1255 ops->get_rssi = iwn5000_get_rssi; 1256 ops->set_txpower = iwn5000_set_txpower; 1257 ops->init_gains = iwn5000_init_gains; 1258 ops->set_gains = iwn5000_set_gains; 1259 ops->add_node = iwn5000_add_node; 1260 ops->tx_done = iwn5000_tx_done; 1261 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1262 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1263 sc->ntxqs = IWN5000_NTXQUEUES; 1264 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1265 sc->ndmachnls = IWN5000_NDMACHNLS; 1266 sc->broadcast_id = IWN5000_ID_BROADCAST; 1267 sc->rxonsz = IWN5000_RXONSZ; 1268 sc->schedsz = IWN5000_SCHEDSZ; 1269 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1270 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1271 sc->fwsz = IWN5000_FWSZ; 1272 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1273 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1274 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1275 1276 return 0; 1277 } 1278 1279 /* 1280 * Attach the interface to 802.11 radiotap. 1281 */ 1282 static void 1283 iwn_radiotap_attach(struct iwn_softc *sc) 1284 { 1285 struct ifnet *ifp = sc->sc_ifp; 1286 struct ieee80211com *ic = ifp->if_l2com; 1287 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1288 ieee80211_radiotap_attach(ic, 1289 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1290 IWN_TX_RADIOTAP_PRESENT, 1291 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1292 IWN_RX_RADIOTAP_PRESENT); 1293 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1294 } 1295 1296 static void 1297 iwn_sysctlattach(struct iwn_softc *sc) 1298 { 1299 #ifdef IWN_DEBUG 1300 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1301 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1302 1303 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1304 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1305 "control debugging printfs"); 1306 #endif 1307 } 1308 1309 static struct ieee80211vap * 1310 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1311 enum ieee80211_opmode opmode, int flags, 1312 const uint8_t bssid[IEEE80211_ADDR_LEN], 1313 const uint8_t mac[IEEE80211_ADDR_LEN]) 1314 { 1315 struct iwn_vap *ivp; 1316 struct ieee80211vap *vap; 1317 uint8_t mac1[IEEE80211_ADDR_LEN]; 1318 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1319 1320 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1321 return NULL; 1322 1323 IEEE80211_ADDR_COPY(mac1, mac); 1324 1325 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 1326 M_80211_VAP, M_NOWAIT | M_ZERO); 1327 if (ivp == NULL) 1328 return NULL; 1329 vap = &ivp->iv_vap; 1330 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1331 ivp->ctx = IWN_RXON_BSS_CTX; 1332 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1333 vap->iv_bmissthreshold = 10; /* override default */ 1334 /* Override with driver methods. */ 1335 ivp->iv_newstate = vap->iv_newstate; 1336 vap->iv_newstate = iwn_newstate; 1337 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1338 1339 ieee80211_ratectl_init(vap); 1340 /* Complete setup. */ 1341 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1342 ic->ic_opmode = opmode; 1343 return vap; 1344 } 1345 1346 static void 1347 iwn_vap_delete(struct ieee80211vap *vap) 1348 { 1349 struct iwn_vap *ivp = IWN_VAP(vap); 1350 1351 ieee80211_ratectl_deinit(vap); 1352 ieee80211_vap_detach(vap); 1353 free(ivp, M_80211_VAP); 1354 } 1355 1356 static int 1357 iwn_detach(device_t dev) 1358 { 1359 struct iwn_softc *sc = device_get_softc(dev); 1360 struct ifnet *ifp = sc->sc_ifp; 1361 struct ieee80211com *ic; 1362 int qid; 1363 1364 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1365 1366 if (ifp != NULL) { 1367 ic = ifp->if_l2com; 1368 1369 ieee80211_draintask(ic, &sc->sc_reinit_task); 1370 ieee80211_draintask(ic, &sc->sc_radioon_task); 1371 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1372 1373 iwn_stop(sc); 1374 1375 taskqueue_drain_all(sc->sc_tq); 1376 taskqueue_free(sc->sc_tq); 1377 1378 callout_drain(&sc->watchdog_to); 1379 callout_drain(&sc->calib_to); 1380 ieee80211_ifdetach(ic); 1381 } 1382 1383 /* Uninstall interrupt handler. */ 1384 if (sc->irq != NULL) { 1385 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1386 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1387 sc->irq); 1388 pci_release_msi(dev); 1389 } 1390 1391 /* Free DMA resources. */ 1392 iwn_free_rx_ring(sc, &sc->rxq); 1393 for (qid = 0; qid < sc->ntxqs; qid++) 1394 iwn_free_tx_ring(sc, &sc->txq[qid]); 1395 iwn_free_sched(sc); 1396 iwn_free_kw(sc); 1397 if (sc->ict != NULL) 1398 iwn_free_ict(sc); 1399 iwn_free_fwmem(sc); 1400 1401 if (sc->mem != NULL) 1402 bus_release_resource(dev, SYS_RES_MEMORY, 1403 rman_get_rid(sc->mem), sc->mem); 1404 1405 if (ifp != NULL) 1406 if_free(ifp); 1407 1408 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1409 IWN_LOCK_DESTROY(sc); 1410 return 0; 1411 } 1412 1413 static int 1414 iwn_shutdown(device_t dev) 1415 { 1416 struct iwn_softc *sc = device_get_softc(dev); 1417 1418 iwn_stop(sc); 1419 return 0; 1420 } 1421 1422 static int 1423 iwn_suspend(device_t dev) 1424 { 1425 struct iwn_softc *sc = device_get_softc(dev); 1426 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1427 1428 ieee80211_suspend_all(ic); 1429 return 0; 1430 } 1431 1432 static int 1433 iwn_resume(device_t dev) 1434 { 1435 struct iwn_softc *sc = device_get_softc(dev); 1436 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1437 1438 /* Clear device-specific "PCI retry timeout" register (41h). */ 1439 pci_write_config(dev, 0x41, 0, 1); 1440 1441 ieee80211_resume_all(ic); 1442 return 0; 1443 } 1444 1445 static int 1446 iwn_nic_lock(struct iwn_softc *sc) 1447 { 1448 int ntries; 1449 1450 /* Request exclusive access to NIC. */ 1451 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1452 1453 /* Spin until we actually get the lock. */ 1454 for (ntries = 0; ntries < 1000; ntries++) { 1455 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1456 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1457 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1458 return 0; 1459 DELAY(10); 1460 } 1461 return ETIMEDOUT; 1462 } 1463 1464 static __inline void 1465 iwn_nic_unlock(struct iwn_softc *sc) 1466 { 1467 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1468 } 1469 1470 static __inline uint32_t 1471 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1472 { 1473 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1474 IWN_BARRIER_READ_WRITE(sc); 1475 return IWN_READ(sc, IWN_PRPH_RDATA); 1476 } 1477 1478 static __inline void 1479 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1480 { 1481 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1482 IWN_BARRIER_WRITE(sc); 1483 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1484 } 1485 1486 static __inline void 1487 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1488 { 1489 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1490 } 1491 1492 static __inline void 1493 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1494 { 1495 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1496 } 1497 1498 static __inline void 1499 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1500 const uint32_t *data, int count) 1501 { 1502 for (; count > 0; count--, data++, addr += 4) 1503 iwn_prph_write(sc, addr, *data); 1504 } 1505 1506 static __inline uint32_t 1507 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1508 { 1509 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1510 IWN_BARRIER_READ_WRITE(sc); 1511 return IWN_READ(sc, IWN_MEM_RDATA); 1512 } 1513 1514 static __inline void 1515 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1516 { 1517 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1518 IWN_BARRIER_WRITE(sc); 1519 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1520 } 1521 1522 static __inline void 1523 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1524 { 1525 uint32_t tmp; 1526 1527 tmp = iwn_mem_read(sc, addr & ~3); 1528 if (addr & 3) 1529 tmp = (tmp & 0x0000ffff) | data << 16; 1530 else 1531 tmp = (tmp & 0xffff0000) | data; 1532 iwn_mem_write(sc, addr & ~3, tmp); 1533 } 1534 1535 static __inline void 1536 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1537 int count) 1538 { 1539 for (; count > 0; count--, addr += 4) 1540 *data++ = iwn_mem_read(sc, addr); 1541 } 1542 1543 static __inline void 1544 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1545 int count) 1546 { 1547 for (; count > 0; count--, addr += 4) 1548 iwn_mem_write(sc, addr, val); 1549 } 1550 1551 static int 1552 iwn_eeprom_lock(struct iwn_softc *sc) 1553 { 1554 int i, ntries; 1555 1556 for (i = 0; i < 100; i++) { 1557 /* Request exclusive access to EEPROM. */ 1558 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1559 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1560 1561 /* Spin until we actually get the lock. */ 1562 for (ntries = 0; ntries < 100; ntries++) { 1563 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1564 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1565 return 0; 1566 DELAY(10); 1567 } 1568 } 1569 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1570 return ETIMEDOUT; 1571 } 1572 1573 static __inline void 1574 iwn_eeprom_unlock(struct iwn_softc *sc) 1575 { 1576 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1577 } 1578 1579 /* 1580 * Initialize access by host to One Time Programmable ROM. 1581 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1582 */ 1583 static int 1584 iwn_init_otprom(struct iwn_softc *sc) 1585 { 1586 uint16_t prev, base, next; 1587 int count, error; 1588 1589 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1590 1591 /* Wait for clock stabilization before accessing prph. */ 1592 if ((error = iwn_clock_wait(sc)) != 0) 1593 return error; 1594 1595 if ((error = iwn_nic_lock(sc)) != 0) 1596 return error; 1597 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1598 DELAY(5); 1599 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1600 iwn_nic_unlock(sc); 1601 1602 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1603 if (sc->base_params->shadow_ram_support) { 1604 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1605 IWN_RESET_LINK_PWR_MGMT_DIS); 1606 } 1607 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1608 /* Clear ECC status. */ 1609 IWN_SETBITS(sc, IWN_OTP_GP, 1610 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1611 1612 /* 1613 * Find the block before last block (contains the EEPROM image) 1614 * for HW without OTP shadow RAM. 1615 */ 1616 if (! sc->base_params->shadow_ram_support) { 1617 /* Switch to absolute addressing mode. */ 1618 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1619 base = prev = 0; 1620 for (count = 0; count < sc->base_params->max_ll_items; 1621 count++) { 1622 error = iwn_read_prom_data(sc, base, &next, 2); 1623 if (error != 0) 1624 return error; 1625 if (next == 0) /* End of linked-list. */ 1626 break; 1627 prev = base; 1628 base = le16toh(next); 1629 } 1630 if (count == 0 || count == sc->base_params->max_ll_items) 1631 return EIO; 1632 /* Skip "next" word. */ 1633 sc->prom_base = prev + 1; 1634 } 1635 1636 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1637 1638 return 0; 1639 } 1640 1641 static int 1642 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1643 { 1644 uint8_t *out = data; 1645 uint32_t val, tmp; 1646 int ntries; 1647 1648 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1649 1650 addr += sc->prom_base; 1651 for (; count > 0; count -= 2, addr++) { 1652 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1653 for (ntries = 0; ntries < 10; ntries++) { 1654 val = IWN_READ(sc, IWN_EEPROM); 1655 if (val & IWN_EEPROM_READ_VALID) 1656 break; 1657 DELAY(5); 1658 } 1659 if (ntries == 10) { 1660 device_printf(sc->sc_dev, 1661 "timeout reading ROM at 0x%x\n", addr); 1662 return ETIMEDOUT; 1663 } 1664 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1665 /* OTPROM, check for ECC errors. */ 1666 tmp = IWN_READ(sc, IWN_OTP_GP); 1667 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1668 device_printf(sc->sc_dev, 1669 "OTPROM ECC error at 0x%x\n", addr); 1670 return EIO; 1671 } 1672 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1673 /* Correctable ECC error, clear bit. */ 1674 IWN_SETBITS(sc, IWN_OTP_GP, 1675 IWN_OTP_GP_ECC_CORR_STTS); 1676 } 1677 } 1678 *out++ = val >> 16; 1679 if (count > 1) 1680 *out++ = val >> 24; 1681 } 1682 1683 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1684 1685 return 0; 1686 } 1687 1688 static void 1689 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1690 { 1691 if (error != 0) 1692 return; 1693 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1694 *(bus_addr_t *)arg = segs[0].ds_addr; 1695 } 1696 1697 static int 1698 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1699 void **kvap, bus_size_t size, bus_size_t alignment) 1700 { 1701 int error; 1702 1703 dma->tag = NULL; 1704 dma->size = size; 1705 1706 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1707 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1708 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1709 if (error != 0) 1710 goto fail; 1711 1712 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1713 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1714 if (error != 0) 1715 goto fail; 1716 1717 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1718 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1719 if (error != 0) 1720 goto fail; 1721 1722 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1723 1724 if (kvap != NULL) 1725 *kvap = dma->vaddr; 1726 1727 return 0; 1728 1729 fail: iwn_dma_contig_free(dma); 1730 return error; 1731 } 1732 1733 static void 1734 iwn_dma_contig_free(struct iwn_dma_info *dma) 1735 { 1736 if (dma->map != NULL) { 1737 if (dma->vaddr != NULL) { 1738 bus_dmamap_sync(dma->tag, dma->map, 1739 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1740 bus_dmamap_unload(dma->tag, dma->map); 1741 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1742 dma->vaddr = NULL; 1743 } 1744 bus_dmamap_destroy(dma->tag, dma->map); 1745 dma->map = NULL; 1746 } 1747 if (dma->tag != NULL) { 1748 bus_dma_tag_destroy(dma->tag); 1749 dma->tag = NULL; 1750 } 1751 } 1752 1753 static int 1754 iwn_alloc_sched(struct iwn_softc *sc) 1755 { 1756 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1757 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1758 sc->schedsz, 1024); 1759 } 1760 1761 static void 1762 iwn_free_sched(struct iwn_softc *sc) 1763 { 1764 iwn_dma_contig_free(&sc->sched_dma); 1765 } 1766 1767 static int 1768 iwn_alloc_kw(struct iwn_softc *sc) 1769 { 1770 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1771 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1772 } 1773 1774 static void 1775 iwn_free_kw(struct iwn_softc *sc) 1776 { 1777 iwn_dma_contig_free(&sc->kw_dma); 1778 } 1779 1780 static int 1781 iwn_alloc_ict(struct iwn_softc *sc) 1782 { 1783 /* ICT table must be aligned on a 4KB boundary. */ 1784 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1785 IWN_ICT_SIZE, 4096); 1786 } 1787 1788 static void 1789 iwn_free_ict(struct iwn_softc *sc) 1790 { 1791 iwn_dma_contig_free(&sc->ict_dma); 1792 } 1793 1794 static int 1795 iwn_alloc_fwmem(struct iwn_softc *sc) 1796 { 1797 /* Must be aligned on a 16-byte boundary. */ 1798 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1799 } 1800 1801 static void 1802 iwn_free_fwmem(struct iwn_softc *sc) 1803 { 1804 iwn_dma_contig_free(&sc->fw_dma); 1805 } 1806 1807 static int 1808 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1809 { 1810 bus_size_t size; 1811 int i, error; 1812 1813 ring->cur = 0; 1814 1815 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1816 1817 /* Allocate RX descriptors (256-byte aligned). */ 1818 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1819 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1820 size, 256); 1821 if (error != 0) { 1822 device_printf(sc->sc_dev, 1823 "%s: could not allocate RX ring DMA memory, error %d\n", 1824 __func__, error); 1825 goto fail; 1826 } 1827 1828 /* Allocate RX status area (16-byte aligned). */ 1829 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1830 sizeof (struct iwn_rx_status), 16); 1831 if (error != 0) { 1832 device_printf(sc->sc_dev, 1833 "%s: could not allocate RX status DMA memory, error %d\n", 1834 __func__, error); 1835 goto fail; 1836 } 1837 1838 /* Create RX buffer DMA tag. */ 1839 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1840 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1841 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1842 &ring->data_dmat); 1843 if (error != 0) { 1844 device_printf(sc->sc_dev, 1845 "%s: could not create RX buf DMA tag, error %d\n", 1846 __func__, error); 1847 goto fail; 1848 } 1849 1850 /* 1851 * Allocate and map RX buffers. 1852 */ 1853 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1854 struct iwn_rx_data *data = &ring->data[i]; 1855 bus_addr_t paddr; 1856 1857 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1858 if (error != 0) { 1859 device_printf(sc->sc_dev, 1860 "%s: could not create RX buf DMA map, error %d\n", 1861 __func__, error); 1862 goto fail; 1863 } 1864 1865 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1866 IWN_RBUF_SIZE); 1867 if (data->m == NULL) { 1868 device_printf(sc->sc_dev, 1869 "%s: could not allocate RX mbuf\n", __func__); 1870 error = ENOBUFS; 1871 goto fail; 1872 } 1873 1874 error = bus_dmamap_load(ring->data_dmat, data->map, 1875 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1876 &paddr, BUS_DMA_NOWAIT); 1877 if (error != 0 && error != EFBIG) { 1878 device_printf(sc->sc_dev, 1879 "%s: can't not map mbuf, error %d\n", __func__, 1880 error); 1881 goto fail; 1882 } 1883 1884 /* Set physical address of RX buffer (256-byte aligned). */ 1885 ring->desc[i] = htole32(paddr >> 8); 1886 } 1887 1888 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1889 BUS_DMASYNC_PREWRITE); 1890 1891 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1892 1893 return 0; 1894 1895 fail: iwn_free_rx_ring(sc, ring); 1896 1897 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1898 1899 return error; 1900 } 1901 1902 static void 1903 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1904 { 1905 int ntries; 1906 1907 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1908 1909 if (iwn_nic_lock(sc) == 0) { 1910 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1911 for (ntries = 0; ntries < 1000; ntries++) { 1912 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1913 IWN_FH_RX_STATUS_IDLE) 1914 break; 1915 DELAY(10); 1916 } 1917 iwn_nic_unlock(sc); 1918 } 1919 ring->cur = 0; 1920 sc->last_rx_valid = 0; 1921 } 1922 1923 static void 1924 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1925 { 1926 int i; 1927 1928 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1929 1930 iwn_dma_contig_free(&ring->desc_dma); 1931 iwn_dma_contig_free(&ring->stat_dma); 1932 1933 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1934 struct iwn_rx_data *data = &ring->data[i]; 1935 1936 if (data->m != NULL) { 1937 bus_dmamap_sync(ring->data_dmat, data->map, 1938 BUS_DMASYNC_POSTREAD); 1939 bus_dmamap_unload(ring->data_dmat, data->map); 1940 m_freem(data->m); 1941 data->m = NULL; 1942 } 1943 if (data->map != NULL) 1944 bus_dmamap_destroy(ring->data_dmat, data->map); 1945 } 1946 if (ring->data_dmat != NULL) { 1947 bus_dma_tag_destroy(ring->data_dmat); 1948 ring->data_dmat = NULL; 1949 } 1950 } 1951 1952 static int 1953 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1954 { 1955 bus_addr_t paddr; 1956 bus_size_t size; 1957 int i, error; 1958 1959 ring->qid = qid; 1960 ring->queued = 0; 1961 ring->cur = 0; 1962 1963 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1964 1965 /* Allocate TX descriptors (256-byte aligned). */ 1966 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1967 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1968 size, 256); 1969 if (error != 0) { 1970 device_printf(sc->sc_dev, 1971 "%s: could not allocate TX ring DMA memory, error %d\n", 1972 __func__, error); 1973 goto fail; 1974 } 1975 1976 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1977 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1978 size, 4); 1979 if (error != 0) { 1980 device_printf(sc->sc_dev, 1981 "%s: could not allocate TX cmd DMA memory, error %d\n", 1982 __func__, error); 1983 goto fail; 1984 } 1985 1986 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1987 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1988 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1989 &ring->data_dmat); 1990 if (error != 0) { 1991 device_printf(sc->sc_dev, 1992 "%s: could not create TX buf DMA tag, error %d\n", 1993 __func__, error); 1994 goto fail; 1995 } 1996 1997 paddr = ring->cmd_dma.paddr; 1998 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1999 struct iwn_tx_data *data = &ring->data[i]; 2000 2001 data->cmd_paddr = paddr; 2002 data->scratch_paddr = paddr + 12; 2003 paddr += sizeof (struct iwn_tx_cmd); 2004 2005 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2006 if (error != 0) { 2007 device_printf(sc->sc_dev, 2008 "%s: could not create TX buf DMA map, error %d\n", 2009 __func__, error); 2010 goto fail; 2011 } 2012 } 2013 2014 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2015 2016 return 0; 2017 2018 fail: iwn_free_tx_ring(sc, ring); 2019 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2020 return error; 2021 } 2022 2023 static void 2024 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2025 { 2026 int i; 2027 2028 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2029 2030 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2031 struct iwn_tx_data *data = &ring->data[i]; 2032 2033 if (data->m != NULL) { 2034 bus_dmamap_sync(ring->data_dmat, data->map, 2035 BUS_DMASYNC_POSTWRITE); 2036 bus_dmamap_unload(ring->data_dmat, data->map); 2037 m_freem(data->m); 2038 data->m = NULL; 2039 } 2040 } 2041 /* Clear TX descriptors. */ 2042 memset(ring->desc, 0, ring->desc_dma.size); 2043 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2044 BUS_DMASYNC_PREWRITE); 2045 sc->qfullmsk &= ~(1 << ring->qid); 2046 ring->queued = 0; 2047 ring->cur = 0; 2048 } 2049 2050 static void 2051 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2052 { 2053 int i; 2054 2055 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2056 2057 iwn_dma_contig_free(&ring->desc_dma); 2058 iwn_dma_contig_free(&ring->cmd_dma); 2059 2060 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2061 struct iwn_tx_data *data = &ring->data[i]; 2062 2063 if (data->m != NULL) { 2064 bus_dmamap_sync(ring->data_dmat, data->map, 2065 BUS_DMASYNC_POSTWRITE); 2066 bus_dmamap_unload(ring->data_dmat, data->map); 2067 m_freem(data->m); 2068 } 2069 if (data->map != NULL) 2070 bus_dmamap_destroy(ring->data_dmat, data->map); 2071 } 2072 if (ring->data_dmat != NULL) { 2073 bus_dma_tag_destroy(ring->data_dmat); 2074 ring->data_dmat = NULL; 2075 } 2076 } 2077 2078 static void 2079 iwn5000_ict_reset(struct iwn_softc *sc) 2080 { 2081 /* Disable interrupts. */ 2082 IWN_WRITE(sc, IWN_INT_MASK, 0); 2083 2084 /* Reset ICT table. */ 2085 memset(sc->ict, 0, IWN_ICT_SIZE); 2086 sc->ict_cur = 0; 2087 2088 /* Set physical address of ICT table (4KB aligned). */ 2089 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2090 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2091 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2092 2093 /* Enable periodic RX interrupt. */ 2094 sc->int_mask |= IWN_INT_RX_PERIODIC; 2095 /* Switch to ICT interrupt mode in driver. */ 2096 sc->sc_flags |= IWN_FLAG_USE_ICT; 2097 2098 /* Re-enable interrupts. */ 2099 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2100 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2101 } 2102 2103 static int 2104 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2105 { 2106 struct iwn_ops *ops = &sc->ops; 2107 uint16_t val; 2108 int error; 2109 2110 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2111 2112 /* Check whether adapter has an EEPROM or an OTPROM. */ 2113 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2114 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2115 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2116 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2117 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2118 2119 /* Adapter has to be powered on for EEPROM access to work. */ 2120 if ((error = iwn_apm_init(sc)) != 0) { 2121 device_printf(sc->sc_dev, 2122 "%s: could not power ON adapter, error %d\n", __func__, 2123 error); 2124 return error; 2125 } 2126 2127 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2128 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2129 return EIO; 2130 } 2131 if ((error = iwn_eeprom_lock(sc)) != 0) { 2132 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2133 __func__, error); 2134 return error; 2135 } 2136 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2137 if ((error = iwn_init_otprom(sc)) != 0) { 2138 device_printf(sc->sc_dev, 2139 "%s: could not initialize OTPROM, error %d\n", 2140 __func__, error); 2141 return error; 2142 } 2143 } 2144 2145 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2146 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2147 /* Check if HT support is bonded out. */ 2148 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2149 sc->sc_flags |= IWN_FLAG_HAS_11N; 2150 2151 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2152 sc->rfcfg = le16toh(val); 2153 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2154 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2155 if (sc->txchainmask == 0) 2156 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2157 if (sc->rxchainmask == 0) 2158 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2159 2160 /* Read MAC address. */ 2161 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2162 2163 /* Read adapter-specific information from EEPROM. */ 2164 ops->read_eeprom(sc); 2165 2166 iwn_apm_stop(sc); /* Power OFF adapter. */ 2167 2168 iwn_eeprom_unlock(sc); 2169 2170 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2171 2172 return 0; 2173 } 2174 2175 static void 2176 iwn4965_read_eeprom(struct iwn_softc *sc) 2177 { 2178 uint32_t addr; 2179 uint16_t val; 2180 int i; 2181 2182 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2183 2184 /* Read regulatory domain (4 ASCII characters). */ 2185 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2186 2187 /* Read the list of authorized channels (20MHz ones only). */ 2188 for (i = 0; i < IWN_NBANDS - 1; i++) { 2189 addr = iwn4965_regulatory_bands[i]; 2190 iwn_read_eeprom_channels(sc, i, addr); 2191 } 2192 2193 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2194 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2195 sc->maxpwr2GHz = val & 0xff; 2196 sc->maxpwr5GHz = val >> 8; 2197 /* Check that EEPROM values are within valid range. */ 2198 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2199 sc->maxpwr5GHz = 38; 2200 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2201 sc->maxpwr2GHz = 38; 2202 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2203 sc->maxpwr2GHz, sc->maxpwr5GHz); 2204 2205 /* Read samples for each TX power group. */ 2206 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2207 sizeof sc->bands); 2208 2209 /* Read voltage at which samples were taken. */ 2210 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2211 sc->eeprom_voltage = (int16_t)le16toh(val); 2212 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2213 sc->eeprom_voltage); 2214 2215 #ifdef IWN_DEBUG 2216 /* Print samples. */ 2217 if (sc->sc_debug & IWN_DEBUG_ANY) { 2218 for (i = 0; i < IWN_NBANDS - 1; i++) 2219 iwn4965_print_power_group(sc, i); 2220 } 2221 #endif 2222 2223 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2224 } 2225 2226 #ifdef IWN_DEBUG 2227 static void 2228 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2229 { 2230 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2231 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2232 int j, c; 2233 2234 printf("===band %d===\n", i); 2235 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2236 printf("chan1 num=%d\n", chans[0].num); 2237 for (c = 0; c < 2; c++) { 2238 for (j = 0; j < IWN_NSAMPLES; j++) { 2239 printf("chain %d, sample %d: temp=%d gain=%d " 2240 "power=%d pa_det=%d\n", c, j, 2241 chans[0].samples[c][j].temp, 2242 chans[0].samples[c][j].gain, 2243 chans[0].samples[c][j].power, 2244 chans[0].samples[c][j].pa_det); 2245 } 2246 } 2247 printf("chan2 num=%d\n", chans[1].num); 2248 for (c = 0; c < 2; c++) { 2249 for (j = 0; j < IWN_NSAMPLES; j++) { 2250 printf("chain %d, sample %d: temp=%d gain=%d " 2251 "power=%d pa_det=%d\n", c, j, 2252 chans[1].samples[c][j].temp, 2253 chans[1].samples[c][j].gain, 2254 chans[1].samples[c][j].power, 2255 chans[1].samples[c][j].pa_det); 2256 } 2257 } 2258 } 2259 #endif 2260 2261 static void 2262 iwn5000_read_eeprom(struct iwn_softc *sc) 2263 { 2264 struct iwn5000_eeprom_calib_hdr hdr; 2265 int32_t volt; 2266 uint32_t base, addr; 2267 uint16_t val; 2268 int i; 2269 2270 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2271 2272 /* Read regulatory domain (4 ASCII characters). */ 2273 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2274 base = le16toh(val); 2275 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2276 sc->eeprom_domain, 4); 2277 2278 /* Read the list of authorized channels (20MHz ones only). */ 2279 for (i = 0; i < IWN_NBANDS - 1; i++) { 2280 addr = base + sc->base_params->regulatory_bands[i]; 2281 iwn_read_eeprom_channels(sc, i, addr); 2282 } 2283 2284 /* Read enhanced TX power information for 6000 Series. */ 2285 if (sc->base_params->enhanced_TX_power) 2286 iwn_read_eeprom_enhinfo(sc); 2287 2288 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2289 base = le16toh(val); 2290 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2291 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2292 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2293 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2294 sc->calib_ver = hdr.version; 2295 2296 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2297 sc->eeprom_voltage = le16toh(hdr.volt); 2298 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2299 sc->eeprom_temp_high=le16toh(val); 2300 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2301 sc->eeprom_temp = le16toh(val); 2302 } 2303 2304 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2305 /* Compute temperature offset. */ 2306 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2307 sc->eeprom_temp = le16toh(val); 2308 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2309 volt = le16toh(val); 2310 sc->temp_off = sc->eeprom_temp - (volt / -5); 2311 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2312 sc->eeprom_temp, volt, sc->temp_off); 2313 } else { 2314 /* Read crystal calibration. */ 2315 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2316 &sc->eeprom_crystal, sizeof (uint32_t)); 2317 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2318 le32toh(sc->eeprom_crystal)); 2319 } 2320 2321 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2322 2323 } 2324 2325 /* 2326 * Translate EEPROM flags to net80211. 2327 */ 2328 static uint32_t 2329 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2330 { 2331 uint32_t nflags; 2332 2333 nflags = 0; 2334 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2335 nflags |= IEEE80211_CHAN_PASSIVE; 2336 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2337 nflags |= IEEE80211_CHAN_NOADHOC; 2338 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2339 nflags |= IEEE80211_CHAN_DFS; 2340 /* XXX apparently IBSS may still be marked */ 2341 nflags |= IEEE80211_CHAN_NOADHOC; 2342 } 2343 2344 return nflags; 2345 } 2346 2347 static void 2348 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2349 { 2350 struct ifnet *ifp = sc->sc_ifp; 2351 struct ieee80211com *ic = ifp->if_l2com; 2352 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2353 const struct iwn_chan_band *band = &iwn_bands[n]; 2354 struct ieee80211_channel *c; 2355 uint8_t chan; 2356 int i, nflags; 2357 2358 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2359 2360 for (i = 0; i < band->nchan; i++) { 2361 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2362 DPRINTF(sc, IWN_DEBUG_RESET, 2363 "skip chan %d flags 0x%x maxpwr %d\n", 2364 band->chan[i], channels[i].flags, 2365 channels[i].maxpwr); 2366 continue; 2367 } 2368 chan = band->chan[i]; 2369 nflags = iwn_eeprom_channel_flags(&channels[i]); 2370 2371 c = &ic->ic_channels[ic->ic_nchans++]; 2372 c->ic_ieee = chan; 2373 c->ic_maxregpower = channels[i].maxpwr; 2374 c->ic_maxpower = 2*c->ic_maxregpower; 2375 2376 if (n == 0) { /* 2GHz band */ 2377 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2378 /* G =>'s B is supported */ 2379 c->ic_flags = IEEE80211_CHAN_B | nflags; 2380 c = &ic->ic_channels[ic->ic_nchans++]; 2381 c[0] = c[-1]; 2382 c->ic_flags = IEEE80211_CHAN_G | nflags; 2383 } else { /* 5GHz band */ 2384 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2385 c->ic_flags = IEEE80211_CHAN_A | nflags; 2386 } 2387 2388 /* Save maximum allowed TX power for this channel. */ 2389 sc->maxpwr[chan] = channels[i].maxpwr; 2390 2391 DPRINTF(sc, IWN_DEBUG_RESET, 2392 "add chan %d flags 0x%x maxpwr %d\n", chan, 2393 channels[i].flags, channels[i].maxpwr); 2394 2395 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2396 /* add HT20, HT40 added separately */ 2397 c = &ic->ic_channels[ic->ic_nchans++]; 2398 c[0] = c[-1]; 2399 c->ic_flags |= IEEE80211_CHAN_HT20; 2400 } 2401 } 2402 2403 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2404 2405 } 2406 2407 static void 2408 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2409 { 2410 struct ifnet *ifp = sc->sc_ifp; 2411 struct ieee80211com *ic = ifp->if_l2com; 2412 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2413 const struct iwn_chan_band *band = &iwn_bands[n]; 2414 struct ieee80211_channel *c, *cent, *extc; 2415 uint8_t chan; 2416 int i, nflags; 2417 2418 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2419 2420 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2421 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2422 return; 2423 } 2424 2425 for (i = 0; i < band->nchan; i++) { 2426 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2427 DPRINTF(sc, IWN_DEBUG_RESET, 2428 "skip chan %d flags 0x%x maxpwr %d\n", 2429 band->chan[i], channels[i].flags, 2430 channels[i].maxpwr); 2431 continue; 2432 } 2433 chan = band->chan[i]; 2434 nflags = iwn_eeprom_channel_flags(&channels[i]); 2435 2436 /* 2437 * Each entry defines an HT40 channel pair; find the 2438 * center channel, then the extension channel above. 2439 */ 2440 cent = ieee80211_find_channel_byieee(ic, chan, 2441 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2442 if (cent == NULL) { /* XXX shouldn't happen */ 2443 device_printf(sc->sc_dev, 2444 "%s: no entry for channel %d\n", __func__, chan); 2445 continue; 2446 } 2447 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2448 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2449 if (extc == NULL) { 2450 DPRINTF(sc, IWN_DEBUG_RESET, 2451 "%s: skip chan %d, extension channel not found\n", 2452 __func__, chan); 2453 continue; 2454 } 2455 2456 DPRINTF(sc, IWN_DEBUG_RESET, 2457 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2458 chan, channels[i].flags, channels[i].maxpwr); 2459 2460 c = &ic->ic_channels[ic->ic_nchans++]; 2461 c[0] = cent[0]; 2462 c->ic_extieee = extc->ic_ieee; 2463 c->ic_flags &= ~IEEE80211_CHAN_HT; 2464 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2465 c = &ic->ic_channels[ic->ic_nchans++]; 2466 c[0] = extc[0]; 2467 c->ic_extieee = cent->ic_ieee; 2468 c->ic_flags &= ~IEEE80211_CHAN_HT; 2469 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2470 } 2471 2472 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2473 2474 } 2475 2476 static void 2477 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2478 { 2479 struct ifnet *ifp = sc->sc_ifp; 2480 struct ieee80211com *ic = ifp->if_l2com; 2481 2482 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2483 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2484 2485 if (n < 5) 2486 iwn_read_eeprom_band(sc, n); 2487 else 2488 iwn_read_eeprom_ht40(sc, n); 2489 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2490 } 2491 2492 static struct iwn_eeprom_chan * 2493 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2494 { 2495 int band, chan, i, j; 2496 2497 if (IEEE80211_IS_CHAN_HT40(c)) { 2498 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2499 if (IEEE80211_IS_CHAN_HT40D(c)) 2500 chan = c->ic_extieee; 2501 else 2502 chan = c->ic_ieee; 2503 for (i = 0; i < iwn_bands[band].nchan; i++) { 2504 if (iwn_bands[band].chan[i] == chan) 2505 return &sc->eeprom_channels[band][i]; 2506 } 2507 } else { 2508 for (j = 0; j < 5; j++) { 2509 for (i = 0; i < iwn_bands[j].nchan; i++) { 2510 if (iwn_bands[j].chan[i] == c->ic_ieee) 2511 return &sc->eeprom_channels[j][i]; 2512 } 2513 } 2514 } 2515 return NULL; 2516 } 2517 2518 /* 2519 * Enforce flags read from EEPROM. 2520 */ 2521 static int 2522 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2523 int nchan, struct ieee80211_channel chans[]) 2524 { 2525 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2526 int i; 2527 2528 for (i = 0; i < nchan; i++) { 2529 struct ieee80211_channel *c = &chans[i]; 2530 struct iwn_eeprom_chan *channel; 2531 2532 channel = iwn_find_eeprom_channel(sc, c); 2533 if (channel == NULL) { 2534 if_printf(ic->ic_ifp, 2535 "%s: invalid channel %u freq %u/0x%x\n", 2536 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2537 return EINVAL; 2538 } 2539 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2540 } 2541 2542 return 0; 2543 } 2544 2545 static void 2546 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2547 { 2548 struct iwn_eeprom_enhinfo enhinfo[35]; 2549 struct ifnet *ifp = sc->sc_ifp; 2550 struct ieee80211com *ic = ifp->if_l2com; 2551 struct ieee80211_channel *c; 2552 uint16_t val, base; 2553 int8_t maxpwr; 2554 uint8_t flags; 2555 int i, j; 2556 2557 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2558 2559 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2560 base = le16toh(val); 2561 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2562 enhinfo, sizeof enhinfo); 2563 2564 for (i = 0; i < nitems(enhinfo); i++) { 2565 flags = enhinfo[i].flags; 2566 if (!(flags & IWN_ENHINFO_VALID)) 2567 continue; /* Skip invalid entries. */ 2568 2569 maxpwr = 0; 2570 if (sc->txchainmask & IWN_ANT_A) 2571 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2572 if (sc->txchainmask & IWN_ANT_B) 2573 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2574 if (sc->txchainmask & IWN_ANT_C) 2575 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2576 if (sc->ntxchains == 2) 2577 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2578 else if (sc->ntxchains == 3) 2579 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2580 2581 for (j = 0; j < ic->ic_nchans; j++) { 2582 c = &ic->ic_channels[j]; 2583 if ((flags & IWN_ENHINFO_5GHZ)) { 2584 if (!IEEE80211_IS_CHAN_A(c)) 2585 continue; 2586 } else if ((flags & IWN_ENHINFO_OFDM)) { 2587 if (!IEEE80211_IS_CHAN_G(c)) 2588 continue; 2589 } else if (!IEEE80211_IS_CHAN_B(c)) 2590 continue; 2591 if ((flags & IWN_ENHINFO_HT40)) { 2592 if (!IEEE80211_IS_CHAN_HT40(c)) 2593 continue; 2594 } else { 2595 if (IEEE80211_IS_CHAN_HT40(c)) 2596 continue; 2597 } 2598 if (enhinfo[i].chan != 0 && 2599 enhinfo[i].chan != c->ic_ieee) 2600 continue; 2601 2602 DPRINTF(sc, IWN_DEBUG_RESET, 2603 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2604 c->ic_flags, maxpwr / 2); 2605 c->ic_maxregpower = maxpwr / 2; 2606 c->ic_maxpower = maxpwr; 2607 } 2608 } 2609 2610 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2611 2612 } 2613 2614 static struct ieee80211_node * 2615 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2616 { 2617 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2618 } 2619 2620 static __inline int 2621 rate2plcp(int rate) 2622 { 2623 switch (rate & 0xff) { 2624 case 12: return 0xd; 2625 case 18: return 0xf; 2626 case 24: return 0x5; 2627 case 36: return 0x7; 2628 case 48: return 0x9; 2629 case 72: return 0xb; 2630 case 96: return 0x1; 2631 case 108: return 0x3; 2632 case 2: return 10; 2633 case 4: return 20; 2634 case 11: return 55; 2635 case 22: return 110; 2636 } 2637 return 0; 2638 } 2639 2640 /* 2641 * Calculate the required PLCP value from the given rate, 2642 * to the given node. 2643 * 2644 * This will take the node configuration (eg 11n, rate table 2645 * setup, etc) into consideration. 2646 */ 2647 static uint32_t 2648 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2649 uint8_t rate) 2650 { 2651 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2652 struct ieee80211com *ic = ni->ni_ic; 2653 uint8_t txant1, txant2; 2654 uint32_t plcp = 0; 2655 int ridx; 2656 2657 /* Use the first valid TX antenna. */ 2658 txant1 = IWN_LSB(sc->txchainmask); 2659 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2660 2661 /* 2662 * If it's an MCS rate, let's set the plcp correctly 2663 * and set the relevant flags based on the node config. 2664 */ 2665 if (rate & IEEE80211_RATE_MCS) { 2666 /* 2667 * Set the initial PLCP value to be between 0->31 for 2668 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2669 * flag. 2670 */ 2671 plcp = RV(rate) | IWN_RFLAG_MCS; 2672 2673 /* 2674 * XXX the following should only occur if both 2675 * the local configuration _and_ the remote node 2676 * advertise these capabilities. Thus this code 2677 * may need fixing! 2678 */ 2679 2680 /* 2681 * Set the channel width and guard interval. 2682 */ 2683 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2684 plcp |= IWN_RFLAG_HT40; 2685 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2686 plcp |= IWN_RFLAG_SGI; 2687 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2688 plcp |= IWN_RFLAG_SGI; 2689 } 2690 2691 /* 2692 * If it's a two stream rate, enable TX on both 2693 * antennas. 2694 * 2695 * XXX three stream rates? 2696 */ 2697 if (rate > 0x87) 2698 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2699 else 2700 plcp |= IWN_RFLAG_ANT(txant1); 2701 } else { 2702 /* 2703 * Set the initial PLCP - fine for both 2704 * OFDM and CCK rates. 2705 */ 2706 plcp = rate2plcp(rate); 2707 2708 /* Set CCK flag if it's CCK */ 2709 2710 /* XXX It would be nice to have a method 2711 * to map the ridx -> phy table entry 2712 * so we could just query that, rather than 2713 * this hack to check against IWN_RIDX_OFDM6. 2714 */ 2715 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2716 rate & IEEE80211_RATE_VAL); 2717 if (ridx < IWN_RIDX_OFDM6 && 2718 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2719 plcp |= IWN_RFLAG_CCK; 2720 2721 /* Set antenna configuration */ 2722 plcp |= IWN_RFLAG_ANT(txant1); 2723 } 2724 2725 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2726 __func__, 2727 rate, 2728 plcp); 2729 2730 return (htole32(plcp)); 2731 #undef RV 2732 } 2733 2734 static void 2735 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2736 { 2737 /* Doesn't do anything at the moment */ 2738 } 2739 2740 static int 2741 iwn_media_change(struct ifnet *ifp) 2742 { 2743 int error; 2744 2745 error = ieee80211_media_change(ifp); 2746 /* NB: only the fixed rate can change and that doesn't need a reset */ 2747 return (error == ENETRESET ? 0 : error); 2748 } 2749 2750 static int 2751 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2752 { 2753 struct iwn_vap *ivp = IWN_VAP(vap); 2754 struct ieee80211com *ic = vap->iv_ic; 2755 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2756 int error = 0; 2757 2758 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2759 2760 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2761 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2762 2763 IEEE80211_UNLOCK(ic); 2764 IWN_LOCK(sc); 2765 callout_stop(&sc->calib_to); 2766 2767 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2768 2769 switch (nstate) { 2770 case IEEE80211_S_ASSOC: 2771 if (vap->iv_state != IEEE80211_S_RUN) 2772 break; 2773 /* FALLTHROUGH */ 2774 case IEEE80211_S_AUTH: 2775 if (vap->iv_state == IEEE80211_S_AUTH) 2776 break; 2777 2778 /* 2779 * !AUTH -> AUTH transition requires state reset to handle 2780 * reassociations correctly. 2781 */ 2782 sc->rxon->associd = 0; 2783 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2784 sc->calib.state = IWN_CALIB_STATE_INIT; 2785 2786 if ((error = iwn_auth(sc, vap)) != 0) { 2787 device_printf(sc->sc_dev, 2788 "%s: could not move to auth state\n", __func__); 2789 } 2790 break; 2791 2792 case IEEE80211_S_RUN: 2793 /* 2794 * RUN -> RUN transition; Just restart the timers. 2795 */ 2796 if (vap->iv_state == IEEE80211_S_RUN) { 2797 sc->calib_cnt = 0; 2798 break; 2799 } 2800 2801 /* 2802 * !RUN -> RUN requires setting the association id 2803 * which is done with a firmware cmd. We also defer 2804 * starting the timers until that work is done. 2805 */ 2806 if ((error = iwn_run(sc, vap)) != 0) { 2807 device_printf(sc->sc_dev, 2808 "%s: could not move to run state\n", __func__); 2809 } 2810 break; 2811 2812 case IEEE80211_S_INIT: 2813 sc->calib.state = IWN_CALIB_STATE_INIT; 2814 break; 2815 2816 default: 2817 break; 2818 } 2819 IWN_UNLOCK(sc); 2820 IEEE80211_LOCK(ic); 2821 if (error != 0){ 2822 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2823 return error; 2824 } 2825 2826 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2827 2828 return ivp->iv_newstate(vap, nstate, arg); 2829 } 2830 2831 static void 2832 iwn_calib_timeout(void *arg) 2833 { 2834 struct iwn_softc *sc = arg; 2835 2836 IWN_LOCK_ASSERT(sc); 2837 2838 /* Force automatic TX power calibration every 60 secs. */ 2839 if (++sc->calib_cnt >= 120) { 2840 uint32_t flags = 0; 2841 2842 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2843 "sending request for statistics"); 2844 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2845 sizeof flags, 1); 2846 sc->calib_cnt = 0; 2847 } 2848 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2849 sc); 2850 } 2851 2852 /* 2853 * Process an RX_PHY firmware notification. This is usually immediately 2854 * followed by an MPDU_RX_DONE notification. 2855 */ 2856 static void 2857 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2858 struct iwn_rx_data *data) 2859 { 2860 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2861 2862 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2863 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2864 2865 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2866 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2867 sc->last_rx_valid = 1; 2868 } 2869 2870 /* 2871 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2872 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2873 */ 2874 static void 2875 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2876 struct iwn_rx_data *data) 2877 { 2878 struct iwn_ops *ops = &sc->ops; 2879 struct ifnet *ifp = sc->sc_ifp; 2880 struct ieee80211com *ic = ifp->if_l2com; 2881 struct iwn_rx_ring *ring = &sc->rxq; 2882 struct ieee80211_frame *wh; 2883 struct ieee80211_node *ni; 2884 struct mbuf *m, *m1; 2885 struct iwn_rx_stat *stat; 2886 caddr_t head; 2887 bus_addr_t paddr; 2888 uint32_t flags; 2889 int error, len, rssi, nf; 2890 2891 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2892 2893 if (desc->type == IWN_MPDU_RX_DONE) { 2894 /* Check for prior RX_PHY notification. */ 2895 if (!sc->last_rx_valid) { 2896 DPRINTF(sc, IWN_DEBUG_ANY, 2897 "%s: missing RX_PHY\n", __func__); 2898 return; 2899 } 2900 stat = &sc->last_rx_stat; 2901 } else 2902 stat = (struct iwn_rx_stat *)(desc + 1); 2903 2904 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2905 2906 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2907 device_printf(sc->sc_dev, 2908 "%s: invalid RX statistic header, len %d\n", __func__, 2909 stat->cfg_phy_len); 2910 return; 2911 } 2912 if (desc->type == IWN_MPDU_RX_DONE) { 2913 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2914 head = (caddr_t)(mpdu + 1); 2915 len = le16toh(mpdu->len); 2916 } else { 2917 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2918 len = le16toh(stat->len); 2919 } 2920 2921 flags = le32toh(*(uint32_t *)(head + len)); 2922 2923 /* Discard frames with a bad FCS early. */ 2924 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2925 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2926 __func__, flags); 2927 ifp->if_ierrors++; 2928 return; 2929 } 2930 /* Discard frames that are too short. */ 2931 if (len < sizeof (*wh)) { 2932 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2933 __func__, len); 2934 ifp->if_ierrors++; 2935 return; 2936 } 2937 2938 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2939 if (m1 == NULL) { 2940 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2941 __func__); 2942 ifp->if_ierrors++; 2943 return; 2944 } 2945 bus_dmamap_unload(ring->data_dmat, data->map); 2946 2947 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2948 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2949 if (error != 0 && error != EFBIG) { 2950 device_printf(sc->sc_dev, 2951 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2952 m_freem(m1); 2953 2954 /* Try to reload the old mbuf. */ 2955 error = bus_dmamap_load(ring->data_dmat, data->map, 2956 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2957 &paddr, BUS_DMA_NOWAIT); 2958 if (error != 0 && error != EFBIG) { 2959 panic("%s: could not load old RX mbuf", __func__); 2960 } 2961 /* Physical address may have changed. */ 2962 ring->desc[ring->cur] = htole32(paddr >> 8); 2963 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2964 BUS_DMASYNC_PREWRITE); 2965 ifp->if_ierrors++; 2966 return; 2967 } 2968 2969 m = data->m; 2970 data->m = m1; 2971 /* Update RX descriptor. */ 2972 ring->desc[ring->cur] = htole32(paddr >> 8); 2973 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2974 BUS_DMASYNC_PREWRITE); 2975 2976 /* Finalize mbuf. */ 2977 m->m_pkthdr.rcvif = ifp; 2978 m->m_data = head; 2979 m->m_pkthdr.len = m->m_len = len; 2980 2981 /* Grab a reference to the source node. */ 2982 wh = mtod(m, struct ieee80211_frame *); 2983 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2984 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2985 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2986 2987 rssi = ops->get_rssi(sc, stat); 2988 2989 if (ieee80211_radiotap_active(ic)) { 2990 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2991 2992 tap->wr_flags = 0; 2993 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2994 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2995 tap->wr_dbm_antsignal = (int8_t)rssi; 2996 tap->wr_dbm_antnoise = (int8_t)nf; 2997 tap->wr_tsft = stat->tstamp; 2998 switch (stat->rate) { 2999 /* CCK rates. */ 3000 case 10: tap->wr_rate = 2; break; 3001 case 20: tap->wr_rate = 4; break; 3002 case 55: tap->wr_rate = 11; break; 3003 case 110: tap->wr_rate = 22; break; 3004 /* OFDM rates. */ 3005 case 0xd: tap->wr_rate = 12; break; 3006 case 0xf: tap->wr_rate = 18; break; 3007 case 0x5: tap->wr_rate = 24; break; 3008 case 0x7: tap->wr_rate = 36; break; 3009 case 0x9: tap->wr_rate = 48; break; 3010 case 0xb: tap->wr_rate = 72; break; 3011 case 0x1: tap->wr_rate = 96; break; 3012 case 0x3: tap->wr_rate = 108; break; 3013 /* Unknown rate: should not happen. */ 3014 default: tap->wr_rate = 0; 3015 } 3016 } 3017 3018 IWN_UNLOCK(sc); 3019 3020 /* Send the frame to the 802.11 layer. */ 3021 if (ni != NULL) { 3022 if (ni->ni_flags & IEEE80211_NODE_HT) 3023 m->m_flags |= M_AMPDU; 3024 (void)ieee80211_input(ni, m, rssi - nf, nf); 3025 /* Node is no longer needed. */ 3026 ieee80211_free_node(ni); 3027 } else 3028 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3029 3030 IWN_LOCK(sc); 3031 3032 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3033 3034 } 3035 3036 /* Process an incoming Compressed BlockAck. */ 3037 static void 3038 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3039 struct iwn_rx_data *data) 3040 { 3041 struct iwn_ops *ops = &sc->ops; 3042 struct ifnet *ifp = sc->sc_ifp; 3043 struct iwn_node *wn; 3044 struct ieee80211_node *ni; 3045 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3046 struct iwn_tx_ring *txq; 3047 struct iwn_tx_data *txdata; 3048 struct ieee80211_tx_ampdu *tap; 3049 struct mbuf *m; 3050 uint64_t bitmap; 3051 uint16_t ssn; 3052 uint8_t tid; 3053 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3054 3055 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3056 3057 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3058 3059 qid = le16toh(ba->qid); 3060 txq = &sc->txq[ba->qid]; 3061 tap = sc->qid2tap[ba->qid]; 3062 tid = tap->txa_tid; 3063 wn = (void *)tap->txa_ni; 3064 3065 res = NULL; 3066 ssn = 0; 3067 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3068 res = tap->txa_private; 3069 ssn = tap->txa_start & 0xfff; 3070 } 3071 3072 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3073 txdata = &txq->data[txq->read]; 3074 3075 /* Unmap and free mbuf. */ 3076 bus_dmamap_sync(txq->data_dmat, txdata->map, 3077 BUS_DMASYNC_POSTWRITE); 3078 bus_dmamap_unload(txq->data_dmat, txdata->map); 3079 m = txdata->m, txdata->m = NULL; 3080 ni = txdata->ni, txdata->ni = NULL; 3081 3082 KASSERT(ni != NULL, ("no node")); 3083 KASSERT(m != NULL, ("no mbuf")); 3084 3085 ieee80211_tx_complete(ni, m, 1); 3086 3087 txq->queued--; 3088 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3089 } 3090 3091 if (txq->queued == 0 && res != NULL) { 3092 iwn_nic_lock(sc); 3093 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3094 iwn_nic_unlock(sc); 3095 sc->qid2tap[qid] = NULL; 3096 free(res, M_DEVBUF); 3097 return; 3098 } 3099 3100 if (wn->agg[tid].bitmap == 0) 3101 return; 3102 3103 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3104 if (shift < 0) 3105 shift += 0x100; 3106 3107 if (wn->agg[tid].nframes > (64 - shift)) 3108 return; 3109 3110 ni = tap->txa_ni; 3111 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3112 for (i = 0; bitmap; i++) { 3113 if ((bitmap & 1) == 0) { 3114 ifp->if_oerrors++; 3115 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3116 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3117 } else { 3118 ifp->if_opackets++; 3119 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3120 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3121 } 3122 bitmap >>= 1; 3123 } 3124 3125 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3126 3127 } 3128 3129 /* 3130 * Process a CALIBRATION_RESULT notification sent by the initialization 3131 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3132 */ 3133 static void 3134 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3135 struct iwn_rx_data *data) 3136 { 3137 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3138 int len, idx = -1; 3139 3140 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3141 3142 /* Runtime firmware should not send such a notification. */ 3143 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3144 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3145 __func__); 3146 return; 3147 } 3148 len = (le32toh(desc->len) & 0x3fff) - 4; 3149 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3150 3151 switch (calib->code) { 3152 case IWN5000_PHY_CALIB_DC: 3153 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3154 idx = 0; 3155 break; 3156 case IWN5000_PHY_CALIB_LO: 3157 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3158 idx = 1; 3159 break; 3160 case IWN5000_PHY_CALIB_TX_IQ: 3161 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3162 idx = 2; 3163 break; 3164 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3165 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3166 idx = 3; 3167 break; 3168 case IWN5000_PHY_CALIB_BASE_BAND: 3169 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3170 idx = 4; 3171 break; 3172 } 3173 if (idx == -1) /* Ignore other results. */ 3174 return; 3175 3176 /* Save calibration result. */ 3177 if (sc->calibcmd[idx].buf != NULL) 3178 free(sc->calibcmd[idx].buf, M_DEVBUF); 3179 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 3180 if (sc->calibcmd[idx].buf == NULL) { 3181 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3182 "not enough memory for calibration result %d\n", 3183 calib->code); 3184 return; 3185 } 3186 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3187 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3188 sc->calibcmd[idx].len = len; 3189 memcpy(sc->calibcmd[idx].buf, calib, len); 3190 } 3191 3192 static void 3193 iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, 3194 struct iwn_stats *stats, int len) 3195 { 3196 struct iwn_stats_bt *stats_bt; 3197 struct iwn_stats *lstats; 3198 3199 /* 3200 * First - check whether the length is the bluetooth or normal. 3201 * 3202 * If it's normal - just copy it and bump out. 3203 * Otherwise we have to convert things. 3204 */ 3205 3206 if (len == sizeof(struct iwn_stats) + 4) { 3207 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3208 sc->last_stat_valid = 1; 3209 return; 3210 } 3211 3212 /* 3213 * If it's not the bluetooth size - log, then just copy. 3214 */ 3215 if (len != sizeof(struct iwn_stats_bt) + 4) { 3216 DPRINTF(sc, IWN_DEBUG_STATS, 3217 "%s: size of rx statistics (%d) not an expected size!\n", 3218 __func__, 3219 len); 3220 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3221 sc->last_stat_valid = 1; 3222 return; 3223 } 3224 3225 /* 3226 * Ok. Time to copy. 3227 */ 3228 stats_bt = (struct iwn_stats_bt *) stats; 3229 lstats = &sc->last_stat; 3230 3231 /* flags */ 3232 lstats->flags = stats_bt->flags; 3233 /* rx_bt */ 3234 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm, 3235 sizeof(struct iwn_rx_phy_stats)); 3236 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck, 3237 sizeof(struct iwn_rx_phy_stats)); 3238 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common, 3239 sizeof(struct iwn_rx_general_stats)); 3240 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht, 3241 sizeof(struct iwn_rx_ht_phy_stats)); 3242 /* tx */ 3243 memcpy(&lstats->tx, &stats_bt->tx, 3244 sizeof(struct iwn_tx_stats)); 3245 /* general */ 3246 memcpy(&lstats->general, &stats_bt->general, 3247 sizeof(struct iwn_general_stats)); 3248 3249 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */ 3250 sc->last_stat_valid = 1; 3251 } 3252 3253 /* 3254 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3255 * The latter is sent by the firmware after each received beacon. 3256 */ 3257 static void 3258 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3259 struct iwn_rx_data *data) 3260 { 3261 struct iwn_ops *ops = &sc->ops; 3262 struct ifnet *ifp = sc->sc_ifp; 3263 struct ieee80211com *ic = ifp->if_l2com; 3264 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3265 struct iwn_calib_state *calib = &sc->calib; 3266 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3267 struct iwn_stats *lstats; 3268 int temp; 3269 3270 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3271 3272 /* Ignore statistics received during a scan. */ 3273 if (vap->iv_state != IEEE80211_S_RUN || 3274 (ic->ic_flags & IEEE80211_F_SCAN)){ 3275 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3276 __func__); 3277 return; 3278 } 3279 3280 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3281 3282 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS, 3283 "%s: received statistics, cmd %d, len %d\n", 3284 __func__, desc->type, le16toh(desc->len)); 3285 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3286 3287 /* 3288 * Collect/track general statistics for reporting. 3289 * 3290 * This takes care of ensuring that the bluetooth sized message 3291 * will be correctly converted to the legacy sized message. 3292 */ 3293 iwn_stats_update(sc, calib, stats, le16toh(desc->len)); 3294 3295 /* 3296 * And now, let's take a reference of it to use! 3297 */ 3298 lstats = &sc->last_stat; 3299 3300 /* Test if temperature has changed. */ 3301 if (lstats->general.temp != sc->rawtemp) { 3302 /* Convert "raw" temperature to degC. */ 3303 sc->rawtemp = stats->general.temp; 3304 temp = ops->get_temperature(sc); 3305 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3306 __func__, temp); 3307 3308 /* Update TX power if need be (4965AGN only). */ 3309 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3310 iwn4965_power_calibration(sc, temp); 3311 } 3312 3313 if (desc->type != IWN_BEACON_STATISTICS) 3314 return; /* Reply to a statistics request. */ 3315 3316 sc->noise = iwn_get_noise(&lstats->rx.general); 3317 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3318 3319 /* Test that RSSI and noise are present in stats report. */ 3320 if (le32toh(lstats->rx.general.flags) != 1) { 3321 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3322 "received statistics without RSSI"); 3323 return; 3324 } 3325 3326 if (calib->state == IWN_CALIB_STATE_ASSOC) 3327 iwn_collect_noise(sc, &lstats->rx.general); 3328 else if (calib->state == IWN_CALIB_STATE_RUN) { 3329 iwn_tune_sensitivity(sc, &lstats->rx); 3330 /* 3331 * XXX TODO: Only run the RX recovery if we're associated! 3332 */ 3333 iwn_check_rx_recovery(sc, lstats); 3334 iwn_save_stats_counters(sc, lstats); 3335 } 3336 3337 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3338 } 3339 3340 /* 3341 * Save the relevant statistic counters for the next calibration 3342 * pass. 3343 */ 3344 static void 3345 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3346 { 3347 struct iwn_calib_state *calib = &sc->calib; 3348 3349 /* Save counters values for next call. */ 3350 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3351 calib->fa_cck = le32toh(rs->rx.cck.fa); 3352 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3353 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3354 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3355 3356 /* Last time we received these tick values */ 3357 sc->last_calib_ticks = ticks; 3358 } 3359 3360 /* 3361 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3362 * and 5000 adapters have different incompatible TX status formats. 3363 */ 3364 static void 3365 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3366 struct iwn_rx_data *data) 3367 { 3368 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3369 struct iwn_tx_ring *ring; 3370 int qid; 3371 3372 qid = desc->qid & 0xf; 3373 ring = &sc->txq[qid]; 3374 3375 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3376 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3377 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3378 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3379 le32toh(stat->status)); 3380 3381 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3382 if (qid >= sc->firstaggqueue) { 3383 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3384 &stat->status); 3385 } else { 3386 iwn_tx_done(sc, desc, stat->ackfailcnt, 3387 le32toh(stat->status) & 0xff); 3388 } 3389 } 3390 3391 static void 3392 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3393 struct iwn_rx_data *data) 3394 { 3395 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3396 struct iwn_tx_ring *ring; 3397 int qid; 3398 3399 qid = desc->qid & 0xf; 3400 ring = &sc->txq[qid]; 3401 3402 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3403 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3404 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3405 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3406 le32toh(stat->status)); 3407 3408 #ifdef notyet 3409 /* Reset TX scheduler slot. */ 3410 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3411 #endif 3412 3413 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3414 if (qid >= sc->firstaggqueue) { 3415 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3416 &stat->status); 3417 } else { 3418 iwn_tx_done(sc, desc, stat->ackfailcnt, 3419 le16toh(stat->status) & 0xff); 3420 } 3421 } 3422 3423 /* 3424 * Adapter-independent backend for TX_DONE firmware notifications. 3425 */ 3426 static void 3427 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3428 uint8_t status) 3429 { 3430 struct ifnet *ifp = sc->sc_ifp; 3431 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3432 struct iwn_tx_data *data = &ring->data[desc->idx]; 3433 struct mbuf *m; 3434 struct ieee80211_node *ni; 3435 struct ieee80211vap *vap; 3436 3437 KASSERT(data->ni != NULL, ("no node")); 3438 3439 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3440 3441 /* Unmap and free mbuf. */ 3442 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3443 bus_dmamap_unload(ring->data_dmat, data->map); 3444 m = data->m, data->m = NULL; 3445 ni = data->ni, data->ni = NULL; 3446 vap = ni->ni_vap; 3447 3448 /* 3449 * Update rate control statistics for the node. 3450 */ 3451 if (status & IWN_TX_FAIL) { 3452 ifp->if_oerrors++; 3453 ieee80211_ratectl_tx_complete(vap, ni, 3454 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3455 } else { 3456 ifp->if_opackets++; 3457 ieee80211_ratectl_tx_complete(vap, ni, 3458 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3459 } 3460 3461 /* 3462 * Channels marked for "radar" require traffic to be received 3463 * to unlock before we can transmit. Until traffic is seen 3464 * any attempt to transmit is returned immediately with status 3465 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3466 * happen on first authenticate after scanning. To workaround 3467 * this we ignore a failure of this sort in AUTH state so the 3468 * 802.11 layer will fall back to using a timeout to wait for 3469 * the AUTH reply. This allows the firmware time to see 3470 * traffic so a subsequent retry of AUTH succeeds. It's 3471 * unclear why the firmware does not maintain state for 3472 * channels recently visited as this would allow immediate 3473 * use of the channel after a scan (where we see traffic). 3474 */ 3475 if (status == IWN_TX_FAIL_TX_LOCKED && 3476 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3477 ieee80211_tx_complete(ni, m, 0); 3478 else 3479 ieee80211_tx_complete(ni, m, 3480 (status & IWN_TX_FAIL) != 0); 3481 3482 sc->sc_tx_timer = 0; 3483 if (--ring->queued < IWN_TX_RING_LOMARK) { 3484 sc->qfullmsk &= ~(1 << ring->qid); 3485 if (sc->qfullmsk == 0 && 3486 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3487 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3488 iwn_start_locked(ifp); 3489 } 3490 } 3491 3492 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3493 3494 } 3495 3496 /* 3497 * Process a "command done" firmware notification. This is where we wakeup 3498 * processes waiting for a synchronous command completion. 3499 */ 3500 static void 3501 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3502 { 3503 struct iwn_tx_ring *ring; 3504 struct iwn_tx_data *data; 3505 int cmd_queue_num; 3506 3507 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3508 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3509 else 3510 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3511 3512 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3513 return; /* Not a command ack. */ 3514 3515 ring = &sc->txq[cmd_queue_num]; 3516 data = &ring->data[desc->idx]; 3517 3518 /* If the command was mapped in an mbuf, free it. */ 3519 if (data->m != NULL) { 3520 bus_dmamap_sync(ring->data_dmat, data->map, 3521 BUS_DMASYNC_POSTWRITE); 3522 bus_dmamap_unload(ring->data_dmat, data->map); 3523 m_freem(data->m); 3524 data->m = NULL; 3525 } 3526 wakeup(&ring->desc[desc->idx]); 3527 } 3528 3529 static void 3530 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3531 void *stat) 3532 { 3533 struct iwn_ops *ops = &sc->ops; 3534 struct ifnet *ifp = sc->sc_ifp; 3535 struct iwn_tx_ring *ring = &sc->txq[qid]; 3536 struct iwn_tx_data *data; 3537 struct mbuf *m; 3538 struct iwn_node *wn; 3539 struct ieee80211_node *ni; 3540 struct ieee80211_tx_ampdu *tap; 3541 uint64_t bitmap; 3542 uint32_t *status = stat; 3543 uint16_t *aggstatus = stat; 3544 uint16_t ssn; 3545 uint8_t tid; 3546 int bit, i, lastidx, *res, seqno, shift, start; 3547 3548 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3549 3550 if (nframes == 1) { 3551 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3552 #ifdef NOT_YET 3553 printf("ieee80211_send_bar()\n"); 3554 #endif 3555 /* 3556 * If we completely fail a transmit, make sure a 3557 * notification is pushed up to the rate control 3558 * layer. 3559 */ 3560 tap = sc->qid2tap[qid]; 3561 tid = tap->txa_tid; 3562 wn = (void *)tap->txa_ni; 3563 ni = tap->txa_ni; 3564 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3565 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3566 } 3567 } 3568 3569 bitmap = 0; 3570 start = idx; 3571 for (i = 0; i < nframes; i++) { 3572 if (le16toh(aggstatus[i * 2]) & 0xc) 3573 continue; 3574 3575 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3576 bit = idx - start; 3577 shift = 0; 3578 if (bit >= 64) { 3579 shift = 0x100 - idx + start; 3580 bit = 0; 3581 start = idx; 3582 } else if (bit <= -64) 3583 bit = 0x100 - start + idx; 3584 else if (bit < 0) { 3585 shift = start - idx; 3586 start = idx; 3587 bit = 0; 3588 } 3589 bitmap = bitmap << shift; 3590 bitmap |= 1ULL << bit; 3591 } 3592 tap = sc->qid2tap[qid]; 3593 tid = tap->txa_tid; 3594 wn = (void *)tap->txa_ni; 3595 wn->agg[tid].bitmap = bitmap; 3596 wn->agg[tid].startidx = start; 3597 wn->agg[tid].nframes = nframes; 3598 3599 res = NULL; 3600 ssn = 0; 3601 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3602 res = tap->txa_private; 3603 ssn = tap->txa_start & 0xfff; 3604 } 3605 3606 seqno = le32toh(*(status + nframes)) & 0xfff; 3607 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3608 data = &ring->data[ring->read]; 3609 3610 /* Unmap and free mbuf. */ 3611 bus_dmamap_sync(ring->data_dmat, data->map, 3612 BUS_DMASYNC_POSTWRITE); 3613 bus_dmamap_unload(ring->data_dmat, data->map); 3614 m = data->m, data->m = NULL; 3615 ni = data->ni, data->ni = NULL; 3616 3617 KASSERT(ni != NULL, ("no node")); 3618 KASSERT(m != NULL, ("no mbuf")); 3619 3620 ieee80211_tx_complete(ni, m, 1); 3621 3622 ring->queued--; 3623 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3624 } 3625 3626 if (ring->queued == 0 && res != NULL) { 3627 iwn_nic_lock(sc); 3628 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3629 iwn_nic_unlock(sc); 3630 sc->qid2tap[qid] = NULL; 3631 free(res, M_DEVBUF); 3632 return; 3633 } 3634 3635 sc->sc_tx_timer = 0; 3636 if (ring->queued < IWN_TX_RING_LOMARK) { 3637 sc->qfullmsk &= ~(1 << ring->qid); 3638 if (sc->qfullmsk == 0 && 3639 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3640 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3641 iwn_start_locked(ifp); 3642 } 3643 } 3644 3645 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3646 3647 } 3648 3649 /* 3650 * Process an INT_FH_RX or INT_SW_RX interrupt. 3651 */ 3652 static void 3653 iwn_notif_intr(struct iwn_softc *sc) 3654 { 3655 struct iwn_ops *ops = &sc->ops; 3656 struct ifnet *ifp = sc->sc_ifp; 3657 struct ieee80211com *ic = ifp->if_l2com; 3658 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3659 uint16_t hw; 3660 3661 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3662 BUS_DMASYNC_POSTREAD); 3663 3664 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3665 while (sc->rxq.cur != hw) { 3666 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3667 struct iwn_rx_desc *desc; 3668 3669 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3670 BUS_DMASYNC_POSTREAD); 3671 desc = mtod(data->m, struct iwn_rx_desc *); 3672 3673 DPRINTF(sc, IWN_DEBUG_RECV, 3674 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3675 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3676 desc->type, iwn_intr_str(desc->type), 3677 le16toh(desc->len)); 3678 3679 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3680 iwn_cmd_done(sc, desc); 3681 3682 switch (desc->type) { 3683 case IWN_RX_PHY: 3684 iwn_rx_phy(sc, desc, data); 3685 break; 3686 3687 case IWN_RX_DONE: /* 4965AGN only. */ 3688 case IWN_MPDU_RX_DONE: 3689 /* An 802.11 frame has been received. */ 3690 iwn_rx_done(sc, desc, data); 3691 break; 3692 3693 case IWN_RX_COMPRESSED_BA: 3694 /* A Compressed BlockAck has been received. */ 3695 iwn_rx_compressed_ba(sc, desc, data); 3696 break; 3697 3698 case IWN_TX_DONE: 3699 /* An 802.11 frame has been transmitted. */ 3700 ops->tx_done(sc, desc, data); 3701 break; 3702 3703 case IWN_RX_STATISTICS: 3704 case IWN_BEACON_STATISTICS: 3705 iwn_rx_statistics(sc, desc, data); 3706 break; 3707 3708 case IWN_BEACON_MISSED: 3709 { 3710 struct iwn_beacon_missed *miss = 3711 (struct iwn_beacon_missed *)(desc + 1); 3712 int misses; 3713 3714 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3715 BUS_DMASYNC_POSTREAD); 3716 misses = le32toh(miss->consecutive); 3717 3718 DPRINTF(sc, IWN_DEBUG_STATE, 3719 "%s: beacons missed %d/%d\n", __func__, 3720 misses, le32toh(miss->total)); 3721 /* 3722 * If more than 5 consecutive beacons are missed, 3723 * reinitialize the sensitivity state machine. 3724 */ 3725 if (vap->iv_state == IEEE80211_S_RUN && 3726 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3727 if (misses > 5) 3728 (void)iwn_init_sensitivity(sc); 3729 if (misses >= vap->iv_bmissthreshold) { 3730 IWN_UNLOCK(sc); 3731 ieee80211_beacon_miss(ic); 3732 IWN_LOCK(sc); 3733 } 3734 } 3735 break; 3736 } 3737 case IWN_UC_READY: 3738 { 3739 struct iwn_ucode_info *uc = 3740 (struct iwn_ucode_info *)(desc + 1); 3741 3742 /* The microcontroller is ready. */ 3743 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3744 BUS_DMASYNC_POSTREAD); 3745 DPRINTF(sc, IWN_DEBUG_RESET, 3746 "microcode alive notification version=%d.%d " 3747 "subtype=%x alive=%x\n", uc->major, uc->minor, 3748 uc->subtype, le32toh(uc->valid)); 3749 3750 if (le32toh(uc->valid) != 1) { 3751 device_printf(sc->sc_dev, 3752 "microcontroller initialization failed"); 3753 break; 3754 } 3755 if (uc->subtype == IWN_UCODE_INIT) { 3756 /* Save microcontroller report. */ 3757 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3758 } 3759 /* Save the address of the error log in SRAM. */ 3760 sc->errptr = le32toh(uc->errptr); 3761 break; 3762 } 3763 case IWN_STATE_CHANGED: 3764 { 3765 /* 3766 * State change allows hardware switch change to be 3767 * noted. However, we handle this in iwn_intr as we 3768 * get both the enable/disble intr. 3769 */ 3770 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3771 BUS_DMASYNC_POSTREAD); 3772 #ifdef IWN_DEBUG 3773 uint32_t *status = (uint32_t *)(desc + 1); 3774 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3775 "state changed to %x\n", 3776 le32toh(*status)); 3777 #endif 3778 break; 3779 } 3780 case IWN_START_SCAN: 3781 { 3782 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3783 BUS_DMASYNC_POSTREAD); 3784 #ifdef IWN_DEBUG 3785 struct iwn_start_scan *scan = 3786 (struct iwn_start_scan *)(desc + 1); 3787 DPRINTF(sc, IWN_DEBUG_ANY, 3788 "%s: scanning channel %d status %x\n", 3789 __func__, scan->chan, le32toh(scan->status)); 3790 #endif 3791 break; 3792 } 3793 case IWN_STOP_SCAN: 3794 { 3795 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3796 BUS_DMASYNC_POSTREAD); 3797 #ifdef IWN_DEBUG 3798 struct iwn_stop_scan *scan = 3799 (struct iwn_stop_scan *)(desc + 1); 3800 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3801 "scan finished nchan=%d status=%d chan=%d\n", 3802 scan->nchan, scan->status, scan->chan); 3803 #endif 3804 sc->sc_is_scanning = 0; 3805 IWN_UNLOCK(sc); 3806 ieee80211_scan_next(vap); 3807 IWN_LOCK(sc); 3808 break; 3809 } 3810 case IWN5000_CALIBRATION_RESULT: 3811 iwn5000_rx_calib_results(sc, desc, data); 3812 break; 3813 3814 case IWN5000_CALIBRATION_DONE: 3815 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3816 wakeup(sc); 3817 break; 3818 } 3819 3820 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3821 } 3822 3823 /* Tell the firmware what we have processed. */ 3824 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3825 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3826 } 3827 3828 /* 3829 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3830 * from power-down sleep mode. 3831 */ 3832 static void 3833 iwn_wakeup_intr(struct iwn_softc *sc) 3834 { 3835 int qid; 3836 3837 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3838 __func__); 3839 3840 /* Wakeup RX and TX rings. */ 3841 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3842 for (qid = 0; qid < sc->ntxqs; qid++) { 3843 struct iwn_tx_ring *ring = &sc->txq[qid]; 3844 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3845 } 3846 } 3847 3848 static void 3849 iwn_rftoggle_intr(struct iwn_softc *sc) 3850 { 3851 struct ifnet *ifp = sc->sc_ifp; 3852 struct ieee80211com *ic = ifp->if_l2com; 3853 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3854 3855 IWN_LOCK_ASSERT(sc); 3856 3857 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3858 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3859 if (tmp & IWN_GP_CNTRL_RFKILL) 3860 ieee80211_runtask(ic, &sc->sc_radioon_task); 3861 else 3862 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3863 } 3864 3865 /* 3866 * Dump the error log of the firmware when a firmware panic occurs. Although 3867 * we can't debug the firmware because it is neither open source nor free, it 3868 * can help us to identify certain classes of problems. 3869 */ 3870 static void 3871 iwn_fatal_intr(struct iwn_softc *sc) 3872 { 3873 struct iwn_fw_dump dump; 3874 int i; 3875 3876 IWN_LOCK_ASSERT(sc); 3877 3878 /* Force a complete recalibration on next init. */ 3879 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3880 3881 /* Check that the error log address is valid. */ 3882 if (sc->errptr < IWN_FW_DATA_BASE || 3883 sc->errptr + sizeof (dump) > 3884 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3885 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3886 sc->errptr); 3887 return; 3888 } 3889 if (iwn_nic_lock(sc) != 0) { 3890 printf("%s: could not read firmware error log\n", __func__); 3891 return; 3892 } 3893 /* Read firmware error log from SRAM. */ 3894 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3895 sizeof (dump) / sizeof (uint32_t)); 3896 iwn_nic_unlock(sc); 3897 3898 if (dump.valid == 0) { 3899 printf("%s: firmware error log is empty\n", __func__); 3900 return; 3901 } 3902 printf("firmware error log:\n"); 3903 printf(" error type = \"%s\" (0x%08X)\n", 3904 (dump.id < nitems(iwn_fw_errmsg)) ? 3905 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3906 dump.id); 3907 printf(" program counter = 0x%08X\n", dump.pc); 3908 printf(" source line = 0x%08X\n", dump.src_line); 3909 printf(" error data = 0x%08X%08X\n", 3910 dump.error_data[0], dump.error_data[1]); 3911 printf(" branch link = 0x%08X%08X\n", 3912 dump.branch_link[0], dump.branch_link[1]); 3913 printf(" interrupt link = 0x%08X%08X\n", 3914 dump.interrupt_link[0], dump.interrupt_link[1]); 3915 printf(" time = %u\n", dump.time[0]); 3916 3917 /* Dump driver status (TX and RX rings) while we're here. */ 3918 printf("driver status:\n"); 3919 for (i = 0; i < sc->ntxqs; i++) { 3920 struct iwn_tx_ring *ring = &sc->txq[i]; 3921 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3922 i, ring->qid, ring->cur, ring->queued); 3923 } 3924 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3925 } 3926 3927 static void 3928 iwn_intr(void *arg) 3929 { 3930 struct iwn_softc *sc = arg; 3931 struct ifnet *ifp = sc->sc_ifp; 3932 uint32_t r1, r2, tmp; 3933 3934 IWN_LOCK(sc); 3935 3936 /* Disable interrupts. */ 3937 IWN_WRITE(sc, IWN_INT_MASK, 0); 3938 3939 /* Read interrupts from ICT (fast) or from registers (slow). */ 3940 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3941 tmp = 0; 3942 while (sc->ict[sc->ict_cur] != 0) { 3943 tmp |= sc->ict[sc->ict_cur]; 3944 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3945 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3946 } 3947 tmp = le32toh(tmp); 3948 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3949 tmp = 0; 3950 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3951 tmp |= 0x8000; 3952 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3953 r2 = 0; /* Unused. */ 3954 } else { 3955 r1 = IWN_READ(sc, IWN_INT); 3956 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3957 return; /* Hardware gone! */ 3958 r2 = IWN_READ(sc, IWN_FH_INT); 3959 } 3960 3961 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3962 , r1, r2); 3963 3964 if (r1 == 0 && r2 == 0) 3965 goto done; /* Interrupt not for us. */ 3966 3967 /* Acknowledge interrupts. */ 3968 IWN_WRITE(sc, IWN_INT, r1); 3969 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3970 IWN_WRITE(sc, IWN_FH_INT, r2); 3971 3972 if (r1 & IWN_INT_RF_TOGGLED) { 3973 iwn_rftoggle_intr(sc); 3974 goto done; 3975 } 3976 if (r1 & IWN_INT_CT_REACHED) { 3977 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3978 __func__); 3979 } 3980 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3981 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3982 __func__); 3983 #ifdef IWN_DEBUG 3984 iwn_debug_register(sc); 3985 #endif 3986 /* Dump firmware error log and stop. */ 3987 iwn_fatal_intr(sc); 3988 3989 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task); 3990 goto done; 3991 } 3992 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3993 (r2 & IWN_FH_INT_RX)) { 3994 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3995 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3996 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3997 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3998 IWN_INT_PERIODIC_DIS); 3999 iwn_notif_intr(sc); 4000 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 4001 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 4002 IWN_INT_PERIODIC_ENA); 4003 } 4004 } else 4005 iwn_notif_intr(sc); 4006 } 4007 4008 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 4009 if (sc->sc_flags & IWN_FLAG_USE_ICT) 4010 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 4011 wakeup(sc); /* FH DMA transfer completed. */ 4012 } 4013 4014 if (r1 & IWN_INT_ALIVE) 4015 wakeup(sc); /* Firmware is alive. */ 4016 4017 if (r1 & IWN_INT_WAKEUP) 4018 iwn_wakeup_intr(sc); 4019 4020 done: 4021 /* Re-enable interrupts. */ 4022 if (ifp->if_flags & IFF_UP) 4023 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 4024 4025 IWN_UNLOCK(sc); 4026 } 4027 4028 /* 4029 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 4030 * 5000 adapters use a slightly different format). 4031 */ 4032 static void 4033 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4034 uint16_t len) 4035 { 4036 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 4037 4038 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4039 4040 *w = htole16(len + 8); 4041 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4042 BUS_DMASYNC_PREWRITE); 4043 if (idx < IWN_SCHED_WINSZ) { 4044 *(w + IWN_TX_RING_COUNT) = *w; 4045 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4046 BUS_DMASYNC_PREWRITE); 4047 } 4048 } 4049 4050 static void 4051 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4052 uint16_t len) 4053 { 4054 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4055 4056 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4057 4058 *w = htole16(id << 12 | (len + 8)); 4059 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4060 BUS_DMASYNC_PREWRITE); 4061 if (idx < IWN_SCHED_WINSZ) { 4062 *(w + IWN_TX_RING_COUNT) = *w; 4063 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4064 BUS_DMASYNC_PREWRITE); 4065 } 4066 } 4067 4068 #ifdef notyet 4069 static void 4070 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 4071 { 4072 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4073 4074 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4075 4076 *w = (*w & htole16(0xf000)) | htole16(1); 4077 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4078 BUS_DMASYNC_PREWRITE); 4079 if (idx < IWN_SCHED_WINSZ) { 4080 *(w + IWN_TX_RING_COUNT) = *w; 4081 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4082 BUS_DMASYNC_PREWRITE); 4083 } 4084 } 4085 #endif 4086 4087 /* 4088 * Check whether OFDM 11g protection will be enabled for the given rate. 4089 * 4090 * The original driver code only enabled protection for OFDM rates. 4091 * It didn't check to see whether it was operating in 11a or 11bg mode. 4092 */ 4093 static int 4094 iwn_check_rate_needs_protection(struct iwn_softc *sc, 4095 struct ieee80211vap *vap, uint8_t rate) 4096 { 4097 struct ieee80211com *ic = vap->iv_ic; 4098 4099 /* 4100 * Not in 2GHz mode? Then there's no need to enable OFDM 4101 * 11bg protection. 4102 */ 4103 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 4104 return (0); 4105 } 4106 4107 /* 4108 * 11bg protection not enabled? Then don't use it. 4109 */ 4110 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 4111 return (0); 4112 4113 /* 4114 * If it's an 11n rate, then for now we enable 4115 * protection. 4116 */ 4117 if (rate & IEEE80211_RATE_MCS) { 4118 return (1); 4119 } 4120 4121 /* 4122 * Do a rate table lookup. If the PHY is CCK, 4123 * don't do protection. 4124 */ 4125 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4126 return (0); 4127 4128 /* 4129 * Yup, enable protection. 4130 */ 4131 return (1); 4132 } 4133 4134 /* 4135 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4136 * the link quality table that reflects this particular entry. 4137 */ 4138 static int 4139 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4140 uint8_t rate) 4141 { 4142 struct ieee80211_rateset *rs; 4143 int is_11n; 4144 int nr; 4145 int i; 4146 uint8_t cmp_rate; 4147 4148 /* 4149 * Figure out if we're using 11n or not here. 4150 */ 4151 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4152 is_11n = 1; 4153 else 4154 is_11n = 0; 4155 4156 /* 4157 * Use the correct rate table. 4158 */ 4159 if (is_11n) { 4160 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4161 nr = ni->ni_htrates.rs_nrates; 4162 } else { 4163 rs = &ni->ni_rates; 4164 nr = rs->rs_nrates; 4165 } 4166 4167 /* 4168 * Find the relevant link quality entry in the table. 4169 */ 4170 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4171 /* 4172 * The link quality table index starts at 0 == highest 4173 * rate, so we walk the rate table backwards. 4174 */ 4175 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4176 if (rate & IEEE80211_RATE_MCS) 4177 cmp_rate |= IEEE80211_RATE_MCS; 4178 4179 #if 0 4180 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4181 __func__, 4182 i, 4183 nr, 4184 rate, 4185 cmp_rate); 4186 #endif 4187 4188 if (cmp_rate == rate) 4189 return (i); 4190 } 4191 4192 /* Failed? Start at the end */ 4193 return (IWN_MAX_TX_RETRIES - 1); 4194 } 4195 4196 static int 4197 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4198 { 4199 struct iwn_ops *ops = &sc->ops; 4200 const struct ieee80211_txparam *tp; 4201 struct ieee80211vap *vap = ni->ni_vap; 4202 struct ieee80211com *ic = ni->ni_ic; 4203 struct iwn_node *wn = (void *)ni; 4204 struct iwn_tx_ring *ring; 4205 struct iwn_tx_desc *desc; 4206 struct iwn_tx_data *data; 4207 struct iwn_tx_cmd *cmd; 4208 struct iwn_cmd_data *tx; 4209 struct ieee80211_frame *wh; 4210 struct ieee80211_key *k = NULL; 4211 struct mbuf *m1; 4212 uint32_t flags; 4213 uint16_t qos; 4214 u_int hdrlen; 4215 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4216 uint8_t tid, type; 4217 int ac, i, totlen, error, pad, nsegs = 0, rate; 4218 4219 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4220 4221 IWN_LOCK_ASSERT(sc); 4222 4223 wh = mtod(m, struct ieee80211_frame *); 4224 hdrlen = ieee80211_anyhdrsize(wh); 4225 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4226 4227 /* Select EDCA Access Category and TX ring for this frame. */ 4228 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4229 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4230 tid = qos & IEEE80211_QOS_TID; 4231 } else { 4232 qos = 0; 4233 tid = 0; 4234 } 4235 ac = M_WME_GETAC(m); 4236 if (m->m_flags & M_AMPDU_MPDU) { 4237 uint16_t seqno; 4238 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4239 4240 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4241 m_freem(m); 4242 return EINVAL; 4243 } 4244 4245 /* 4246 * Queue this frame to the hardware ring that we've 4247 * negotiated AMPDU TX on. 4248 * 4249 * Note that the sequence number must match the TX slot 4250 * being used! 4251 */ 4252 ac = *(int *)tap->txa_private; 4253 seqno = ni->ni_txseqs[tid]; 4254 *(uint16_t *)wh->i_seq = 4255 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4256 ring = &sc->txq[ac]; 4257 if ((seqno % 256) != ring->cur) { 4258 device_printf(sc->sc_dev, 4259 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4260 __func__, 4261 m, 4262 seqno, 4263 seqno % 256, 4264 ring->cur); 4265 } 4266 ni->ni_txseqs[tid]++; 4267 } 4268 ring = &sc->txq[ac]; 4269 desc = &ring->desc[ring->cur]; 4270 data = &ring->data[ring->cur]; 4271 4272 /* Choose a TX rate index. */ 4273 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4274 if (type == IEEE80211_FC0_TYPE_MGT) 4275 rate = tp->mgmtrate; 4276 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4277 rate = tp->mcastrate; 4278 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4279 rate = tp->ucastrate; 4280 else if (m->m_flags & M_EAPOL) 4281 rate = tp->mgmtrate; 4282 else { 4283 /* XXX pass pktlen */ 4284 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4285 rate = ni->ni_txrate; 4286 } 4287 4288 /* Encrypt the frame if need be. */ 4289 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 4290 /* Retrieve key for TX. */ 4291 k = ieee80211_crypto_encap(ni, m); 4292 if (k == NULL) { 4293 m_freem(m); 4294 return ENOBUFS; 4295 } 4296 /* 802.11 header may have moved. */ 4297 wh = mtod(m, struct ieee80211_frame *); 4298 } 4299 totlen = m->m_pkthdr.len; 4300 4301 if (ieee80211_radiotap_active_vap(vap)) { 4302 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4303 4304 tap->wt_flags = 0; 4305 tap->wt_rate = rate; 4306 if (k != NULL) 4307 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4308 4309 ieee80211_radiotap_tx(vap, m); 4310 } 4311 4312 /* Prepare TX firmware command. */ 4313 cmd = &ring->cmd[ring->cur]; 4314 cmd->code = IWN_CMD_TX_DATA; 4315 cmd->flags = 0; 4316 cmd->qid = ring->qid; 4317 cmd->idx = ring->cur; 4318 4319 tx = (struct iwn_cmd_data *)cmd->data; 4320 /* NB: No need to clear tx, all fields are reinitialized here. */ 4321 tx->scratch = 0; /* clear "scratch" area */ 4322 4323 flags = 0; 4324 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4325 /* Unicast frame, check if an ACK is expected. */ 4326 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4327 IEEE80211_QOS_ACKPOLICY_NOACK) 4328 flags |= IWN_TX_NEED_ACK; 4329 } 4330 if ((wh->i_fc[0] & 4331 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4332 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4333 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4334 4335 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4336 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4337 4338 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4339 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4340 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4341 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4342 flags |= IWN_TX_NEED_RTS; 4343 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4344 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4345 flags |= IWN_TX_NEED_CTS; 4346 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4347 flags |= IWN_TX_NEED_RTS; 4348 } 4349 4350 /* XXX HT protection? */ 4351 4352 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4353 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4354 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4355 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4356 flags |= IWN_TX_NEED_PROTECTION; 4357 } else 4358 flags |= IWN_TX_FULL_TXOP; 4359 } 4360 } 4361 4362 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4363 type != IEEE80211_FC0_TYPE_DATA) 4364 tx->id = sc->broadcast_id; 4365 else 4366 tx->id = wn->id; 4367 4368 if (type == IEEE80211_FC0_TYPE_MGT) { 4369 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4370 4371 /* Tell HW to set timestamp in probe responses. */ 4372 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4373 flags |= IWN_TX_INSERT_TSTAMP; 4374 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4375 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4376 tx->timeout = htole16(3); 4377 else 4378 tx->timeout = htole16(2); 4379 } else 4380 tx->timeout = htole16(0); 4381 4382 if (hdrlen & 3) { 4383 /* First segment length must be a multiple of 4. */ 4384 flags |= IWN_TX_NEED_PADDING; 4385 pad = 4 - (hdrlen & 3); 4386 } else 4387 pad = 0; 4388 4389 tx->len = htole16(totlen); 4390 tx->tid = tid; 4391 tx->rts_ntries = 60; 4392 tx->data_ntries = 15; 4393 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4394 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4395 if (tx->id == sc->broadcast_id) { 4396 /* Group or management frame. */ 4397 tx->linkq = 0; 4398 } else { 4399 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4400 flags |= IWN_TX_LINKQ; /* enable MRR */ 4401 } 4402 4403 /* Set physical address of "scratch area". */ 4404 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4405 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4406 4407 /* Copy 802.11 header in TX command. */ 4408 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4409 4410 /* Trim 802.11 header. */ 4411 m_adj(m, hdrlen); 4412 tx->security = 0; 4413 tx->flags = htole32(flags); 4414 4415 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4416 &nsegs, BUS_DMA_NOWAIT); 4417 if (error != 0) { 4418 if (error != EFBIG) { 4419 device_printf(sc->sc_dev, 4420 "%s: can't map mbuf (error %d)\n", __func__, error); 4421 m_freem(m); 4422 return error; 4423 } 4424 /* Too many DMA segments, linearize mbuf. */ 4425 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4426 if (m1 == NULL) { 4427 device_printf(sc->sc_dev, 4428 "%s: could not defrag mbuf\n", __func__); 4429 m_freem(m); 4430 return ENOBUFS; 4431 } 4432 m = m1; 4433 4434 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4435 segs, &nsegs, BUS_DMA_NOWAIT); 4436 if (error != 0) { 4437 device_printf(sc->sc_dev, 4438 "%s: can't map mbuf (error %d)\n", __func__, error); 4439 m_freem(m); 4440 return error; 4441 } 4442 } 4443 4444 data->m = m; 4445 data->ni = ni; 4446 4447 DPRINTF(sc, IWN_DEBUG_XMIT, 4448 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4449 __func__, 4450 ring->qid, 4451 ring->cur, 4452 m->m_pkthdr.len, 4453 nsegs, 4454 rate, 4455 tx->rate); 4456 4457 /* Fill TX descriptor. */ 4458 desc->nsegs = 1; 4459 if (m->m_len != 0) 4460 desc->nsegs += nsegs; 4461 /* First DMA segment is used by the TX command. */ 4462 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4463 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4464 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4465 /* Other DMA segments are for data payload. */ 4466 seg = &segs[0]; 4467 for (i = 1; i <= nsegs; i++) { 4468 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4469 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4470 seg->ds_len << 4); 4471 seg++; 4472 } 4473 4474 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4475 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4476 BUS_DMASYNC_PREWRITE); 4477 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4478 BUS_DMASYNC_PREWRITE); 4479 4480 /* Update TX scheduler. */ 4481 if (ring->qid >= sc->firstaggqueue) 4482 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4483 4484 /* Kick TX ring. */ 4485 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4486 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4487 4488 /* Mark TX ring as full if we reach a certain threshold. */ 4489 if (++ring->queued > IWN_TX_RING_HIMARK) 4490 sc->qfullmsk |= 1 << ring->qid; 4491 4492 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4493 4494 return 0; 4495 } 4496 4497 static int 4498 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4499 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4500 { 4501 struct iwn_ops *ops = &sc->ops; 4502 // struct ifnet *ifp = sc->sc_ifp; 4503 struct ieee80211vap *vap = ni->ni_vap; 4504 // struct ieee80211com *ic = ifp->if_l2com; 4505 struct iwn_tx_cmd *cmd; 4506 struct iwn_cmd_data *tx; 4507 struct ieee80211_frame *wh; 4508 struct iwn_tx_ring *ring; 4509 struct iwn_tx_desc *desc; 4510 struct iwn_tx_data *data; 4511 struct mbuf *m1; 4512 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4513 uint32_t flags; 4514 u_int hdrlen; 4515 int ac, totlen, error, pad, nsegs = 0, i, rate; 4516 uint8_t type; 4517 4518 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4519 4520 IWN_LOCK_ASSERT(sc); 4521 4522 wh = mtod(m, struct ieee80211_frame *); 4523 hdrlen = ieee80211_anyhdrsize(wh); 4524 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4525 4526 ac = params->ibp_pri & 3; 4527 4528 ring = &sc->txq[ac]; 4529 desc = &ring->desc[ring->cur]; 4530 data = &ring->data[ring->cur]; 4531 4532 /* Choose a TX rate. */ 4533 rate = params->ibp_rate0; 4534 totlen = m->m_pkthdr.len; 4535 4536 /* Prepare TX firmware command. */ 4537 cmd = &ring->cmd[ring->cur]; 4538 cmd->code = IWN_CMD_TX_DATA; 4539 cmd->flags = 0; 4540 cmd->qid = ring->qid; 4541 cmd->idx = ring->cur; 4542 4543 tx = (struct iwn_cmd_data *)cmd->data; 4544 /* NB: No need to clear tx, all fields are reinitialized here. */ 4545 tx->scratch = 0; /* clear "scratch" area */ 4546 4547 flags = 0; 4548 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4549 flags |= IWN_TX_NEED_ACK; 4550 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4551 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4552 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4553 flags &= ~IWN_TX_NEED_RTS; 4554 flags |= IWN_TX_NEED_PROTECTION; 4555 } else 4556 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4557 } 4558 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4559 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4560 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4561 flags &= ~IWN_TX_NEED_CTS; 4562 flags |= IWN_TX_NEED_PROTECTION; 4563 } else 4564 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4565 } 4566 if (type == IEEE80211_FC0_TYPE_MGT) { 4567 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4568 4569 /* Tell HW to set timestamp in probe responses. */ 4570 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4571 flags |= IWN_TX_INSERT_TSTAMP; 4572 4573 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4574 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4575 tx->timeout = htole16(3); 4576 else 4577 tx->timeout = htole16(2); 4578 } else 4579 tx->timeout = htole16(0); 4580 4581 if (hdrlen & 3) { 4582 /* First segment length must be a multiple of 4. */ 4583 flags |= IWN_TX_NEED_PADDING; 4584 pad = 4 - (hdrlen & 3); 4585 } else 4586 pad = 0; 4587 4588 if (ieee80211_radiotap_active_vap(vap)) { 4589 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4590 4591 tap->wt_flags = 0; 4592 tap->wt_rate = rate; 4593 4594 ieee80211_radiotap_tx(vap, m); 4595 } 4596 4597 tx->len = htole16(totlen); 4598 tx->tid = 0; 4599 tx->id = sc->broadcast_id; 4600 tx->rts_ntries = params->ibp_try1; 4601 tx->data_ntries = params->ibp_try0; 4602 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4603 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4604 4605 /* Group or management frame. */ 4606 tx->linkq = 0; 4607 4608 /* Set physical address of "scratch area". */ 4609 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4610 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4611 4612 /* Copy 802.11 header in TX command. */ 4613 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4614 4615 /* Trim 802.11 header. */ 4616 m_adj(m, hdrlen); 4617 tx->security = 0; 4618 tx->flags = htole32(flags); 4619 4620 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4621 &nsegs, BUS_DMA_NOWAIT); 4622 if (error != 0) { 4623 if (error != EFBIG) { 4624 device_printf(sc->sc_dev, 4625 "%s: can't map mbuf (error %d)\n", __func__, error); 4626 m_freem(m); 4627 return error; 4628 } 4629 /* Too many DMA segments, linearize mbuf. */ 4630 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4631 if (m1 == NULL) { 4632 device_printf(sc->sc_dev, 4633 "%s: could not defrag mbuf\n", __func__); 4634 m_freem(m); 4635 return ENOBUFS; 4636 } 4637 m = m1; 4638 4639 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4640 segs, &nsegs, BUS_DMA_NOWAIT); 4641 if (error != 0) { 4642 device_printf(sc->sc_dev, 4643 "%s: can't map mbuf (error %d)\n", __func__, error); 4644 m_freem(m); 4645 return error; 4646 } 4647 } 4648 4649 data->m = m; 4650 data->ni = ni; 4651 4652 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4653 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4654 4655 /* Fill TX descriptor. */ 4656 desc->nsegs = 1; 4657 if (m->m_len != 0) 4658 desc->nsegs += nsegs; 4659 /* First DMA segment is used by the TX command. */ 4660 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4661 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4662 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4663 /* Other DMA segments are for data payload. */ 4664 seg = &segs[0]; 4665 for (i = 1; i <= nsegs; i++) { 4666 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4667 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4668 seg->ds_len << 4); 4669 seg++; 4670 } 4671 4672 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4673 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4674 BUS_DMASYNC_PREWRITE); 4675 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4676 BUS_DMASYNC_PREWRITE); 4677 4678 /* Update TX scheduler. */ 4679 if (ring->qid >= sc->firstaggqueue) 4680 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4681 4682 /* Kick TX ring. */ 4683 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4684 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4685 4686 /* Mark TX ring as full if we reach a certain threshold. */ 4687 if (++ring->queued > IWN_TX_RING_HIMARK) 4688 sc->qfullmsk |= 1 << ring->qid; 4689 4690 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4691 4692 return 0; 4693 } 4694 4695 static int 4696 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4697 const struct ieee80211_bpf_params *params) 4698 { 4699 struct ieee80211com *ic = ni->ni_ic; 4700 struct ifnet *ifp = ic->ic_ifp; 4701 struct iwn_softc *sc = ifp->if_softc; 4702 int error = 0; 4703 4704 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4705 4706 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4707 ieee80211_free_node(ni); 4708 m_freem(m); 4709 return ENETDOWN; 4710 } 4711 4712 IWN_LOCK(sc); 4713 if (params == NULL) { 4714 /* 4715 * Legacy path; interpret frame contents to decide 4716 * precisely how to send the frame. 4717 */ 4718 error = iwn_tx_data(sc, m, ni); 4719 } else { 4720 /* 4721 * Caller supplied explicit parameters to use in 4722 * sending the frame. 4723 */ 4724 error = iwn_tx_data_raw(sc, m, ni, params); 4725 } 4726 if (error != 0) { 4727 /* NB: m is reclaimed on tx failure */ 4728 ieee80211_free_node(ni); 4729 ifp->if_oerrors++; 4730 } 4731 sc->sc_tx_timer = 5; 4732 4733 IWN_UNLOCK(sc); 4734 4735 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4736 4737 return error; 4738 } 4739 4740 static void 4741 iwn_start(struct ifnet *ifp) 4742 { 4743 struct iwn_softc *sc = ifp->if_softc; 4744 4745 IWN_LOCK(sc); 4746 iwn_start_locked(ifp); 4747 IWN_UNLOCK(sc); 4748 } 4749 4750 static void 4751 iwn_start_locked(struct ifnet *ifp) 4752 { 4753 struct iwn_softc *sc = ifp->if_softc; 4754 struct ieee80211_node *ni; 4755 struct mbuf *m; 4756 4757 IWN_LOCK_ASSERT(sc); 4758 4759 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4760 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4761 return; 4762 4763 for (;;) { 4764 if (sc->qfullmsk != 0) { 4765 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4766 break; 4767 } 4768 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4769 if (m == NULL) 4770 break; 4771 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4772 if (iwn_tx_data(sc, m, ni) != 0) { 4773 ieee80211_free_node(ni); 4774 ifp->if_oerrors++; 4775 continue; 4776 } 4777 sc->sc_tx_timer = 5; 4778 } 4779 } 4780 4781 static void 4782 iwn_watchdog(void *arg) 4783 { 4784 struct iwn_softc *sc = arg; 4785 struct ifnet *ifp = sc->sc_ifp; 4786 struct ieee80211com *ic = ifp->if_l2com; 4787 4788 IWN_LOCK_ASSERT(sc); 4789 4790 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4791 4792 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4793 4794 if (sc->sc_tx_timer > 0) { 4795 if (--sc->sc_tx_timer == 0) { 4796 if_printf(ifp, "device timeout\n"); 4797 ieee80211_runtask(ic, &sc->sc_reinit_task); 4798 return; 4799 } 4800 } 4801 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4802 } 4803 4804 static int 4805 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4806 { 4807 struct iwn_softc *sc = ifp->if_softc; 4808 struct ieee80211com *ic = ifp->if_l2com; 4809 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4810 struct ifreq *ifr = (struct ifreq *) data; 4811 int error = 0, startall = 0, stop = 0; 4812 4813 switch (cmd) { 4814 case SIOCGIFADDR: 4815 error = ether_ioctl(ifp, cmd, data); 4816 break; 4817 case SIOCSIFFLAGS: 4818 IWN_LOCK(sc); 4819 if (ifp->if_flags & IFF_UP) { 4820 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4821 iwn_init_locked(sc); 4822 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4823 startall = 1; 4824 else 4825 stop = 1; 4826 } 4827 } else { 4828 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4829 iwn_stop_locked(sc); 4830 } 4831 IWN_UNLOCK(sc); 4832 if (startall) 4833 ieee80211_start_all(ic); 4834 else if (vap != NULL && stop) 4835 ieee80211_stop(vap); 4836 break; 4837 case SIOCGIFMEDIA: 4838 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4839 break; 4840 case SIOCGIWNSTATS: 4841 IWN_LOCK(sc); 4842 /* XXX validate permissions/memory/etc? */ 4843 error = copyout(&sc->last_stat, ifr->ifr_data, 4844 sizeof(struct iwn_stats)); 4845 IWN_UNLOCK(sc); 4846 break; 4847 case SIOCZIWNSTATS: 4848 IWN_LOCK(sc); 4849 memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); 4850 IWN_UNLOCK(sc); 4851 error = 0; 4852 break; 4853 default: 4854 error = EINVAL; 4855 break; 4856 } 4857 return error; 4858 } 4859 4860 /* 4861 * Send a command to the firmware. 4862 */ 4863 static int 4864 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4865 { 4866 struct iwn_tx_ring *ring; 4867 struct iwn_tx_desc *desc; 4868 struct iwn_tx_data *data; 4869 struct iwn_tx_cmd *cmd; 4870 struct mbuf *m; 4871 bus_addr_t paddr; 4872 int totlen, error; 4873 int cmd_queue_num; 4874 4875 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4876 4877 if (async == 0) 4878 IWN_LOCK_ASSERT(sc); 4879 4880 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4881 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4882 else 4883 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4884 4885 ring = &sc->txq[cmd_queue_num]; 4886 desc = &ring->desc[ring->cur]; 4887 data = &ring->data[ring->cur]; 4888 totlen = 4 + size; 4889 4890 if (size > sizeof cmd->data) { 4891 /* Command is too large to fit in a descriptor. */ 4892 if (totlen > MCLBYTES) 4893 return EINVAL; 4894 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4895 if (m == NULL) 4896 return ENOMEM; 4897 cmd = mtod(m, struct iwn_tx_cmd *); 4898 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4899 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4900 if (error != 0) { 4901 m_freem(m); 4902 return error; 4903 } 4904 data->m = m; 4905 } else { 4906 cmd = &ring->cmd[ring->cur]; 4907 paddr = data->cmd_paddr; 4908 } 4909 4910 cmd->code = code; 4911 cmd->flags = 0; 4912 cmd->qid = ring->qid; 4913 cmd->idx = ring->cur; 4914 memcpy(cmd->data, buf, size); 4915 4916 desc->nsegs = 1; 4917 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4918 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4919 4920 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4921 __func__, iwn_intr_str(cmd->code), cmd->code, 4922 cmd->flags, cmd->qid, cmd->idx); 4923 4924 if (size > sizeof cmd->data) { 4925 bus_dmamap_sync(ring->data_dmat, data->map, 4926 BUS_DMASYNC_PREWRITE); 4927 } else { 4928 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4929 BUS_DMASYNC_PREWRITE); 4930 } 4931 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4932 BUS_DMASYNC_PREWRITE); 4933 4934 /* Kick command ring. */ 4935 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4936 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4937 4938 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4939 4940 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4941 } 4942 4943 static int 4944 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4945 { 4946 struct iwn4965_node_info hnode; 4947 caddr_t src, dst; 4948 4949 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4950 4951 /* 4952 * We use the node structure for 5000 Series internally (it is 4953 * a superset of the one for 4965AGN). We thus copy the common 4954 * fields before sending the command. 4955 */ 4956 src = (caddr_t)node; 4957 dst = (caddr_t)&hnode; 4958 memcpy(dst, src, 48); 4959 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4960 memcpy(dst + 48, src + 72, 20); 4961 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4962 } 4963 4964 static int 4965 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4966 { 4967 4968 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4969 4970 /* Direct mapping. */ 4971 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4972 } 4973 4974 static int 4975 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4976 { 4977 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4978 struct iwn_node *wn = (void *)ni; 4979 struct ieee80211_rateset *rs; 4980 struct iwn_cmd_link_quality linkq; 4981 uint8_t txant; 4982 int i, rate, txrate; 4983 int is_11n; 4984 4985 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4986 4987 /* Use the first valid TX antenna. */ 4988 txant = IWN_LSB(sc->txchainmask); 4989 4990 memset(&linkq, 0, sizeof linkq); 4991 linkq.id = wn->id; 4992 linkq.antmsk_1stream = txant; 4993 4994 /* 4995 * The '2 stream' setup is a bit .. odd. 4996 * 4997 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4998 * the firmware panics (eg Intel 5100.) 4999 * 5000 * For NICs that support two antennas, we use ANT_AB. 5001 * 5002 * For NICs that support three antennas, we use the two that 5003 * wasn't the default one. 5004 * 5005 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 5006 * this to only one antenna. 5007 */ 5008 5009 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 5010 5011 /* Default - transmit on the other antennas */ 5012 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 5013 5014 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 5015 if (linkq.antmsk_2stream == 0) 5016 linkq.antmsk_2stream = IWN_ANT_AB; 5017 5018 /* 5019 * If the NIC is a two-stream TX NIC, configure the TX mask to 5020 * the default chainmask 5021 */ 5022 else if (sc->ntxchains == 2) 5023 linkq.antmsk_2stream = sc->txchainmask; 5024 5025 linkq.ampdu_max = 32; /* XXX negotiated? */ 5026 linkq.ampdu_threshold = 3; 5027 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5028 5029 DPRINTF(sc, IWN_DEBUG_XMIT, 5030 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 5031 __func__, 5032 linkq.antmsk_1stream, 5033 linkq.antmsk_2stream, 5034 sc->ntxchains); 5035 5036 /* 5037 * Are we using 11n rates? Ensure the channel is 5038 * 11n _and_ we have some 11n rates, or don't 5039 * try. 5040 */ 5041 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 5042 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 5043 is_11n = 1; 5044 } else { 5045 rs = &ni->ni_rates; 5046 is_11n = 0; 5047 } 5048 5049 /* Start at highest available bit-rate. */ 5050 /* 5051 * XXX this is all very dirty! 5052 */ 5053 if (is_11n) 5054 txrate = ni->ni_htrates.rs_nrates - 1; 5055 else 5056 txrate = rs->rs_nrates - 1; 5057 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 5058 uint32_t plcp; 5059 5060 if (is_11n) 5061 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 5062 else 5063 rate = RV(rs->rs_rates[txrate]); 5064 5065 DPRINTF(sc, IWN_DEBUG_XMIT, 5066 "%s: i=%d, txrate=%d, rate=0x%02x\n", 5067 __func__, 5068 i, 5069 txrate, 5070 rate); 5071 5072 /* Do rate -> PLCP config mapping */ 5073 plcp = iwn_rate_to_plcp(sc, ni, rate); 5074 linkq.retry[i] = plcp; 5075 5076 /* 5077 * The mimo field is an index into the table which 5078 * indicates the first index where it and subsequent entries 5079 * will not be using MIMO. 5080 * 5081 * Since we're filling linkq from 0..15 and we're filling 5082 * from the higest MCS rates to the lowest rates, if we 5083 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 5084 * the next entry.) That way if the next entry is a non-MIMO 5085 * entry, we're already pointing at it. 5086 */ 5087 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 5088 RV(le32toh(plcp)) > 7) 5089 linkq.mimo = i + 1; 5090 5091 /* Next retry at immediate lower bit-rate. */ 5092 if (txrate > 0) 5093 txrate--; 5094 } 5095 5096 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5097 5098 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 5099 #undef RV 5100 } 5101 5102 /* 5103 * Broadcast node is used to send group-addressed and management frames. 5104 */ 5105 static int 5106 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 5107 { 5108 struct iwn_ops *ops = &sc->ops; 5109 struct ifnet *ifp = sc->sc_ifp; 5110 struct ieee80211com *ic = ifp->if_l2com; 5111 struct iwn_node_info node; 5112 struct iwn_cmd_link_quality linkq; 5113 uint8_t txant; 5114 int i, error; 5115 5116 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5117 5118 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5119 5120 memset(&node, 0, sizeof node); 5121 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 5122 node.id = sc->broadcast_id; 5123 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 5124 if ((error = ops->add_node(sc, &node, async)) != 0) 5125 return error; 5126 5127 /* Use the first valid TX antenna. */ 5128 txant = IWN_LSB(sc->txchainmask); 5129 5130 memset(&linkq, 0, sizeof linkq); 5131 linkq.id = sc->broadcast_id; 5132 linkq.antmsk_1stream = txant; 5133 linkq.antmsk_2stream = IWN_ANT_AB; 5134 linkq.ampdu_max = 64; 5135 linkq.ampdu_threshold = 3; 5136 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5137 5138 /* Use lowest mandatory bit-rate. */ 5139 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 5140 linkq.retry[0] = htole32(0xd); 5141 else 5142 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5143 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5144 /* Use same bit-rate for all TX retries. */ 5145 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5146 linkq.retry[i] = linkq.retry[0]; 5147 } 5148 5149 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5150 5151 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5152 } 5153 5154 static int 5155 iwn_updateedca(struct ieee80211com *ic) 5156 { 5157 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5158 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5159 struct iwn_edca_params cmd; 5160 int aci; 5161 5162 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5163 5164 memset(&cmd, 0, sizeof cmd); 5165 cmd.flags = htole32(IWN_EDCA_UPDATE); 5166 for (aci = 0; aci < WME_NUM_AC; aci++) { 5167 const struct wmeParams *ac = 5168 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5169 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5170 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5171 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5172 cmd.ac[aci].txoplimit = 5173 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5174 } 5175 IEEE80211_UNLOCK(ic); 5176 IWN_LOCK(sc); 5177 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5178 IWN_UNLOCK(sc); 5179 IEEE80211_LOCK(ic); 5180 5181 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5182 5183 return 0; 5184 #undef IWN_EXP2 5185 } 5186 5187 static void 5188 iwn_update_mcast(struct ifnet *ifp) 5189 { 5190 /* Ignore */ 5191 } 5192 5193 static void 5194 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5195 { 5196 struct iwn_cmd_led led; 5197 5198 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5199 5200 #if 0 5201 /* XXX don't set LEDs during scan? */ 5202 if (sc->sc_is_scanning) 5203 return; 5204 #endif 5205 5206 /* Clear microcode LED ownership. */ 5207 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5208 5209 led.which = which; 5210 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5211 led.off = off; 5212 led.on = on; 5213 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5214 } 5215 5216 /* 5217 * Set the critical temperature at which the firmware will stop the radio 5218 * and notify us. 5219 */ 5220 static int 5221 iwn_set_critical_temp(struct iwn_softc *sc) 5222 { 5223 struct iwn_critical_temp crit; 5224 int32_t temp; 5225 5226 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5227 5228 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5229 5230 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5231 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5232 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5233 temp = IWN_CTOK(110); 5234 else 5235 temp = 110; 5236 memset(&crit, 0, sizeof crit); 5237 crit.tempR = htole32(temp); 5238 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5239 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5240 } 5241 5242 static int 5243 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5244 { 5245 struct iwn_cmd_timing cmd; 5246 uint64_t val, mod; 5247 5248 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5249 5250 memset(&cmd, 0, sizeof cmd); 5251 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5252 cmd.bintval = htole16(ni->ni_intval); 5253 cmd.lintval = htole16(10); 5254 5255 /* Compute remaining time until next beacon. */ 5256 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5257 mod = le64toh(cmd.tstamp) % val; 5258 cmd.binitval = htole32((uint32_t)(val - mod)); 5259 5260 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5261 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5262 5263 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5264 } 5265 5266 static void 5267 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5268 { 5269 struct ifnet *ifp = sc->sc_ifp; 5270 struct ieee80211com *ic = ifp->if_l2com; 5271 5272 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5273 5274 /* Adjust TX power if need be (delta >= 3 degC). */ 5275 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5276 __func__, sc->temp, temp); 5277 if (abs(temp - sc->temp) >= 3) { 5278 /* Record temperature of last calibration. */ 5279 sc->temp = temp; 5280 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5281 } 5282 } 5283 5284 /* 5285 * Set TX power for current channel (each rate has its own power settings). 5286 * This function takes into account the regulatory information from EEPROM, 5287 * the current temperature and the current voltage. 5288 */ 5289 static int 5290 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5291 int async) 5292 { 5293 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5294 #define fdivround(a, b, n) \ 5295 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5296 /* Linear interpolation. */ 5297 #define interpolate(x, x1, y1, x2, y2, n) \ 5298 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5299 5300 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5301 struct iwn_ucode_info *uc = &sc->ucode_info; 5302 struct iwn4965_cmd_txpower cmd; 5303 struct iwn4965_eeprom_chan_samples *chans; 5304 const uint8_t *rf_gain, *dsp_gain; 5305 int32_t vdiff, tdiff; 5306 int i, c, grp, maxpwr; 5307 uint8_t chan; 5308 5309 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5310 /* Retrieve current channel from last RXON. */ 5311 chan = sc->rxon->chan; 5312 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5313 chan); 5314 5315 memset(&cmd, 0, sizeof cmd); 5316 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5317 cmd.chan = chan; 5318 5319 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5320 maxpwr = sc->maxpwr5GHz; 5321 rf_gain = iwn4965_rf_gain_5ghz; 5322 dsp_gain = iwn4965_dsp_gain_5ghz; 5323 } else { 5324 maxpwr = sc->maxpwr2GHz; 5325 rf_gain = iwn4965_rf_gain_2ghz; 5326 dsp_gain = iwn4965_dsp_gain_2ghz; 5327 } 5328 5329 /* Compute voltage compensation. */ 5330 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5331 if (vdiff > 0) 5332 vdiff *= 2; 5333 if (abs(vdiff) > 2) 5334 vdiff = 0; 5335 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5336 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5337 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5338 5339 /* Get channel attenuation group. */ 5340 if (chan <= 20) /* 1-20 */ 5341 grp = 4; 5342 else if (chan <= 43) /* 34-43 */ 5343 grp = 0; 5344 else if (chan <= 70) /* 44-70 */ 5345 grp = 1; 5346 else if (chan <= 124) /* 71-124 */ 5347 grp = 2; 5348 else /* 125-200 */ 5349 grp = 3; 5350 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5351 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5352 5353 /* Get channel sub-band. */ 5354 for (i = 0; i < IWN_NBANDS; i++) 5355 if (sc->bands[i].lo != 0 && 5356 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5357 break; 5358 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5359 return EINVAL; 5360 chans = sc->bands[i].chans; 5361 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5362 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5363 5364 for (c = 0; c < 2; c++) { 5365 uint8_t power, gain, temp; 5366 int maxchpwr, pwr, ridx, idx; 5367 5368 power = interpolate(chan, 5369 chans[0].num, chans[0].samples[c][1].power, 5370 chans[1].num, chans[1].samples[c][1].power, 1); 5371 gain = interpolate(chan, 5372 chans[0].num, chans[0].samples[c][1].gain, 5373 chans[1].num, chans[1].samples[c][1].gain, 1); 5374 temp = interpolate(chan, 5375 chans[0].num, chans[0].samples[c][1].temp, 5376 chans[1].num, chans[1].samples[c][1].temp, 1); 5377 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5378 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5379 __func__, c, power, gain, temp); 5380 5381 /* Compute temperature compensation. */ 5382 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5383 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5384 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5385 __func__, tdiff, sc->temp, temp); 5386 5387 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5388 /* Convert dBm to half-dBm. */ 5389 maxchpwr = sc->maxpwr[chan] * 2; 5390 if ((ridx / 8) & 1) 5391 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5392 5393 pwr = maxpwr; 5394 5395 /* Adjust TX power based on rate. */ 5396 if ((ridx % 8) == 5) 5397 pwr -= 15; /* OFDM48: -7.5dB */ 5398 else if ((ridx % 8) == 6) 5399 pwr -= 17; /* OFDM54: -8.5dB */ 5400 else if ((ridx % 8) == 7) 5401 pwr -= 20; /* OFDM60: -10dB */ 5402 else 5403 pwr -= 10; /* Others: -5dB */ 5404 5405 /* Do not exceed channel max TX power. */ 5406 if (pwr > maxchpwr) 5407 pwr = maxchpwr; 5408 5409 idx = gain - (pwr - power) - tdiff - vdiff; 5410 if ((ridx / 8) & 1) /* MIMO */ 5411 idx += (int32_t)le32toh(uc->atten[grp][c]); 5412 5413 if (cmd.band == 0) 5414 idx += 9; /* 5GHz */ 5415 if (ridx == IWN_RIDX_MAX) 5416 idx += 5; /* CCK */ 5417 5418 /* Make sure idx stays in a valid range. */ 5419 if (idx < 0) 5420 idx = 0; 5421 else if (idx > IWN4965_MAX_PWR_INDEX) 5422 idx = IWN4965_MAX_PWR_INDEX; 5423 5424 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5425 "%s: Tx chain %d, rate idx %d: power=%d\n", 5426 __func__, c, ridx, idx); 5427 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5428 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5429 } 5430 } 5431 5432 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5433 "%s: set tx power for chan %d\n", __func__, chan); 5434 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5435 5436 #undef interpolate 5437 #undef fdivround 5438 } 5439 5440 static int 5441 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5442 int async) 5443 { 5444 struct iwn5000_cmd_txpower cmd; 5445 5446 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5447 5448 /* 5449 * TX power calibration is handled automatically by the firmware 5450 * for 5000 Series. 5451 */ 5452 memset(&cmd, 0, sizeof cmd); 5453 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5454 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5455 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5456 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5457 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5458 } 5459 5460 /* 5461 * Retrieve the maximum RSSI (in dBm) among receivers. 5462 */ 5463 static int 5464 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5465 { 5466 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5467 uint8_t mask, agc; 5468 int rssi; 5469 5470 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5471 5472 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5473 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5474 5475 rssi = 0; 5476 if (mask & IWN_ANT_A) 5477 rssi = MAX(rssi, phy->rssi[0]); 5478 if (mask & IWN_ANT_B) 5479 rssi = MAX(rssi, phy->rssi[2]); 5480 if (mask & IWN_ANT_C) 5481 rssi = MAX(rssi, phy->rssi[4]); 5482 5483 DPRINTF(sc, IWN_DEBUG_RECV, 5484 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5485 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5486 rssi - agc - IWN_RSSI_TO_DBM); 5487 return rssi - agc - IWN_RSSI_TO_DBM; 5488 } 5489 5490 static int 5491 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5492 { 5493 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5494 uint8_t agc; 5495 int rssi; 5496 5497 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5498 5499 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5500 5501 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5502 le16toh(phy->rssi[1]) & 0xff); 5503 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5504 5505 DPRINTF(sc, IWN_DEBUG_RECV, 5506 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5507 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5508 rssi - agc - IWN_RSSI_TO_DBM); 5509 return rssi - agc - IWN_RSSI_TO_DBM; 5510 } 5511 5512 /* 5513 * Retrieve the average noise (in dBm) among receivers. 5514 */ 5515 static int 5516 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5517 { 5518 int i, total, nbant, noise; 5519 5520 total = nbant = 0; 5521 for (i = 0; i < 3; i++) { 5522 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5523 continue; 5524 total += noise; 5525 nbant++; 5526 } 5527 /* There should be at least one antenna but check anyway. */ 5528 return (nbant == 0) ? -127 : (total / nbant) - 107; 5529 } 5530 5531 /* 5532 * Compute temperature (in degC) from last received statistics. 5533 */ 5534 static int 5535 iwn4965_get_temperature(struct iwn_softc *sc) 5536 { 5537 struct iwn_ucode_info *uc = &sc->ucode_info; 5538 int32_t r1, r2, r3, r4, temp; 5539 5540 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5541 5542 r1 = le32toh(uc->temp[0].chan20MHz); 5543 r2 = le32toh(uc->temp[1].chan20MHz); 5544 r3 = le32toh(uc->temp[2].chan20MHz); 5545 r4 = le32toh(sc->rawtemp); 5546 5547 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5548 return 0; 5549 5550 /* Sign-extend 23-bit R4 value to 32-bit. */ 5551 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5552 /* Compute temperature in Kelvin. */ 5553 temp = (259 * (r4 - r2)) / (r3 - r1); 5554 temp = (temp * 97) / 100 + 8; 5555 5556 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5557 IWN_KTOC(temp)); 5558 return IWN_KTOC(temp); 5559 } 5560 5561 static int 5562 iwn5000_get_temperature(struct iwn_softc *sc) 5563 { 5564 int32_t temp; 5565 5566 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5567 5568 /* 5569 * Temperature is not used by the driver for 5000 Series because 5570 * TX power calibration is handled by firmware. 5571 */ 5572 temp = le32toh(sc->rawtemp); 5573 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5574 temp = (temp / -5) + sc->temp_off; 5575 temp = IWN_KTOC(temp); 5576 } 5577 return temp; 5578 } 5579 5580 /* 5581 * Initialize sensitivity calibration state machine. 5582 */ 5583 static int 5584 iwn_init_sensitivity(struct iwn_softc *sc) 5585 { 5586 struct iwn_ops *ops = &sc->ops; 5587 struct iwn_calib_state *calib = &sc->calib; 5588 uint32_t flags; 5589 int error; 5590 5591 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5592 5593 /* Reset calibration state machine. */ 5594 memset(calib, 0, sizeof (*calib)); 5595 calib->state = IWN_CALIB_STATE_INIT; 5596 calib->cck_state = IWN_CCK_STATE_HIFA; 5597 /* Set initial correlation values. */ 5598 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5599 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5600 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5601 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5602 calib->cck_x4 = 125; 5603 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5604 calib->energy_cck = sc->limits->energy_cck; 5605 5606 /* Write initial sensitivity. */ 5607 if ((error = iwn_send_sensitivity(sc)) != 0) 5608 return error; 5609 5610 /* Write initial gains. */ 5611 if ((error = ops->init_gains(sc)) != 0) 5612 return error; 5613 5614 /* Request statistics at each beacon interval. */ 5615 flags = 0; 5616 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5617 __func__); 5618 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5619 } 5620 5621 /* 5622 * Collect noise and RSSI statistics for the first 20 beacons received 5623 * after association and use them to determine connected antennas and 5624 * to set differential gains. 5625 */ 5626 static void 5627 iwn_collect_noise(struct iwn_softc *sc, 5628 const struct iwn_rx_general_stats *stats) 5629 { 5630 struct iwn_ops *ops = &sc->ops; 5631 struct iwn_calib_state *calib = &sc->calib; 5632 struct ifnet *ifp = sc->sc_ifp; 5633 struct ieee80211com *ic = ifp->if_l2com; 5634 uint32_t val; 5635 int i; 5636 5637 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5638 5639 /* Accumulate RSSI and noise for all 3 antennas. */ 5640 for (i = 0; i < 3; i++) { 5641 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5642 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5643 } 5644 /* NB: We update differential gains only once after 20 beacons. */ 5645 if (++calib->nbeacons < 20) 5646 return; 5647 5648 /* Determine highest average RSSI. */ 5649 val = MAX(calib->rssi[0], calib->rssi[1]); 5650 val = MAX(calib->rssi[2], val); 5651 5652 /* Determine which antennas are connected. */ 5653 sc->chainmask = sc->rxchainmask; 5654 for (i = 0; i < 3; i++) 5655 if (val - calib->rssi[i] > 15 * 20) 5656 sc->chainmask &= ~(1 << i); 5657 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5658 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5659 __func__, sc->rxchainmask, sc->chainmask); 5660 5661 /* If none of the TX antennas are connected, keep at least one. */ 5662 if ((sc->chainmask & sc->txchainmask) == 0) 5663 sc->chainmask |= IWN_LSB(sc->txchainmask); 5664 5665 (void)ops->set_gains(sc); 5666 calib->state = IWN_CALIB_STATE_RUN; 5667 5668 #ifdef notyet 5669 /* XXX Disable RX chains with no antennas connected. */ 5670 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5671 if (sc->sc_is_scanning) 5672 device_printf(sc->sc_dev, 5673 "%s: is_scanning set, before RXON\n", 5674 __func__); 5675 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5676 #endif 5677 5678 /* Enable power-saving mode if requested by user. */ 5679 if (ic->ic_flags & IEEE80211_F_PMGTON) 5680 (void)iwn_set_pslevel(sc, 0, 3, 1); 5681 5682 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5683 5684 } 5685 5686 static int 5687 iwn4965_init_gains(struct iwn_softc *sc) 5688 { 5689 struct iwn_phy_calib_gain cmd; 5690 5691 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5692 5693 memset(&cmd, 0, sizeof cmd); 5694 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5695 /* Differential gains initially set to 0 for all 3 antennas. */ 5696 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5697 "%s: setting initial differential gains\n", __func__); 5698 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5699 } 5700 5701 static int 5702 iwn5000_init_gains(struct iwn_softc *sc) 5703 { 5704 struct iwn_phy_calib cmd; 5705 5706 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5707 5708 memset(&cmd, 0, sizeof cmd); 5709 cmd.code = sc->reset_noise_gain; 5710 cmd.ngroups = 1; 5711 cmd.isvalid = 1; 5712 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5713 "%s: setting initial differential gains\n", __func__); 5714 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5715 } 5716 5717 static int 5718 iwn4965_set_gains(struct iwn_softc *sc) 5719 { 5720 struct iwn_calib_state *calib = &sc->calib; 5721 struct iwn_phy_calib_gain cmd; 5722 int i, delta, noise; 5723 5724 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5725 5726 /* Get minimal noise among connected antennas. */ 5727 noise = INT_MAX; /* NB: There's at least one antenna. */ 5728 for (i = 0; i < 3; i++) 5729 if (sc->chainmask & (1 << i)) 5730 noise = MIN(calib->noise[i], noise); 5731 5732 memset(&cmd, 0, sizeof cmd); 5733 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5734 /* Set differential gains for connected antennas. */ 5735 for (i = 0; i < 3; i++) { 5736 if (sc->chainmask & (1 << i)) { 5737 /* Compute attenuation (in unit of 1.5dB). */ 5738 delta = (noise - (int32_t)calib->noise[i]) / 30; 5739 /* NB: delta <= 0 */ 5740 /* Limit to [-4.5dB,0]. */ 5741 cmd.gain[i] = MIN(abs(delta), 3); 5742 if (delta < 0) 5743 cmd.gain[i] |= 1 << 2; /* sign bit */ 5744 } 5745 } 5746 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5747 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5748 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5749 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5750 } 5751 5752 static int 5753 iwn5000_set_gains(struct iwn_softc *sc) 5754 { 5755 struct iwn_calib_state *calib = &sc->calib; 5756 struct iwn_phy_calib_gain cmd; 5757 int i, ant, div, delta; 5758 5759 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5760 5761 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5762 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5763 5764 memset(&cmd, 0, sizeof cmd); 5765 cmd.code = sc->noise_gain; 5766 cmd.ngroups = 1; 5767 cmd.isvalid = 1; 5768 /* Get first available RX antenna as referential. */ 5769 ant = IWN_LSB(sc->rxchainmask); 5770 /* Set differential gains for other antennas. */ 5771 for (i = ant + 1; i < 3; i++) { 5772 if (sc->chainmask & (1 << i)) { 5773 /* The delta is relative to antenna "ant". */ 5774 delta = ((int32_t)calib->noise[ant] - 5775 (int32_t)calib->noise[i]) / div; 5776 /* Limit to [-4.5dB,+4.5dB]. */ 5777 cmd.gain[i - 1] = MIN(abs(delta), 3); 5778 if (delta < 0) 5779 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5780 } 5781 } 5782 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5783 "setting differential gains Ant B/C: %x/%x (%x)\n", 5784 cmd.gain[0], cmd.gain[1], sc->chainmask); 5785 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5786 } 5787 5788 /* 5789 * Tune RF RX sensitivity based on the number of false alarms detected 5790 * during the last beacon period. 5791 */ 5792 static void 5793 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5794 { 5795 #define inc(val, inc, max) \ 5796 if ((val) < (max)) { \ 5797 if ((val) < (max) - (inc)) \ 5798 (val) += (inc); \ 5799 else \ 5800 (val) = (max); \ 5801 needs_update = 1; \ 5802 } 5803 #define dec(val, dec, min) \ 5804 if ((val) > (min)) { \ 5805 if ((val) > (min) + (dec)) \ 5806 (val) -= (dec); \ 5807 else \ 5808 (val) = (min); \ 5809 needs_update = 1; \ 5810 } 5811 5812 const struct iwn_sensitivity_limits *limits = sc->limits; 5813 struct iwn_calib_state *calib = &sc->calib; 5814 uint32_t val, rxena, fa; 5815 uint32_t energy[3], energy_min; 5816 uint8_t noise[3], noise_ref; 5817 int i, needs_update = 0; 5818 5819 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5820 5821 /* Check that we've been enabled long enough. */ 5822 if ((rxena = le32toh(stats->general.load)) == 0){ 5823 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5824 return; 5825 } 5826 5827 /* Compute number of false alarms since last call for OFDM. */ 5828 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5829 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5830 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5831 5832 if (fa > 50 * rxena) { 5833 /* High false alarm count, decrease sensitivity. */ 5834 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5835 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5836 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5837 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5838 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5839 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5840 5841 } else if (fa < 5 * rxena) { 5842 /* Low false alarm count, increase sensitivity. */ 5843 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5844 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5845 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5846 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5847 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5848 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5849 } 5850 5851 /* Compute maximum noise among 3 receivers. */ 5852 for (i = 0; i < 3; i++) 5853 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5854 val = MAX(noise[0], noise[1]); 5855 val = MAX(noise[2], val); 5856 /* Insert it into our samples table. */ 5857 calib->noise_samples[calib->cur_noise_sample] = val; 5858 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5859 5860 /* Compute maximum noise among last 20 samples. */ 5861 noise_ref = calib->noise_samples[0]; 5862 for (i = 1; i < 20; i++) 5863 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5864 5865 /* Compute maximum energy among 3 receivers. */ 5866 for (i = 0; i < 3; i++) 5867 energy[i] = le32toh(stats->general.energy[i]); 5868 val = MIN(energy[0], energy[1]); 5869 val = MIN(energy[2], val); 5870 /* Insert it into our samples table. */ 5871 calib->energy_samples[calib->cur_energy_sample] = val; 5872 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5873 5874 /* Compute minimum energy among last 10 samples. */ 5875 energy_min = calib->energy_samples[0]; 5876 for (i = 1; i < 10; i++) 5877 energy_min = MAX(energy_min, calib->energy_samples[i]); 5878 energy_min += 6; 5879 5880 /* Compute number of false alarms since last call for CCK. */ 5881 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5882 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5883 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5884 5885 if (fa > 50 * rxena) { 5886 /* High false alarm count, decrease sensitivity. */ 5887 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5888 "%s: CCK high false alarm count: %u\n", __func__, fa); 5889 calib->cck_state = IWN_CCK_STATE_HIFA; 5890 calib->low_fa = 0; 5891 5892 if (calib->cck_x4 > 160) { 5893 calib->noise_ref = noise_ref; 5894 if (calib->energy_cck > 2) 5895 dec(calib->energy_cck, 2, energy_min); 5896 } 5897 if (calib->cck_x4 < 160) { 5898 calib->cck_x4 = 161; 5899 needs_update = 1; 5900 } else 5901 inc(calib->cck_x4, 3, limits->max_cck_x4); 5902 5903 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5904 5905 } else if (fa < 5 * rxena) { 5906 /* Low false alarm count, increase sensitivity. */ 5907 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5908 "%s: CCK low false alarm count: %u\n", __func__, fa); 5909 calib->cck_state = IWN_CCK_STATE_LOFA; 5910 calib->low_fa++; 5911 5912 if (calib->cck_state != IWN_CCK_STATE_INIT && 5913 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5914 calib->low_fa > 100)) { 5915 inc(calib->energy_cck, 2, limits->min_energy_cck); 5916 dec(calib->cck_x4, 3, limits->min_cck_x4); 5917 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5918 } 5919 } else { 5920 /* Not worth to increase or decrease sensitivity. */ 5921 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5922 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5923 calib->low_fa = 0; 5924 calib->noise_ref = noise_ref; 5925 5926 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5927 /* Previous interval had many false alarms. */ 5928 dec(calib->energy_cck, 8, energy_min); 5929 } 5930 calib->cck_state = IWN_CCK_STATE_INIT; 5931 } 5932 5933 if (needs_update) 5934 (void)iwn_send_sensitivity(sc); 5935 5936 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5937 5938 #undef dec 5939 #undef inc 5940 } 5941 5942 static int 5943 iwn_send_sensitivity(struct iwn_softc *sc) 5944 { 5945 struct iwn_calib_state *calib = &sc->calib; 5946 struct iwn_enhanced_sensitivity_cmd cmd; 5947 int len; 5948 5949 memset(&cmd, 0, sizeof cmd); 5950 len = sizeof (struct iwn_sensitivity_cmd); 5951 cmd.which = IWN_SENSITIVITY_WORKTBL; 5952 /* OFDM modulation. */ 5953 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5954 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5955 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5956 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5957 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5958 cmd.energy_ofdm_th = htole16(62); 5959 /* CCK modulation. */ 5960 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5961 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5962 cmd.energy_cck = htole16(calib->energy_cck); 5963 /* Barker modulation: use default values. */ 5964 cmd.corr_barker = htole16(190); 5965 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 5966 5967 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5968 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5969 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5970 calib->ofdm_mrc_x4, calib->cck_x4, 5971 calib->cck_mrc_x4, calib->energy_cck); 5972 5973 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5974 goto send; 5975 /* Enhanced sensitivity settings. */ 5976 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5977 cmd.ofdm_det_slope_mrc = htole16(668); 5978 cmd.ofdm_det_icept_mrc = htole16(4); 5979 cmd.ofdm_det_slope = htole16(486); 5980 cmd.ofdm_det_icept = htole16(37); 5981 cmd.cck_det_slope_mrc = htole16(853); 5982 cmd.cck_det_icept_mrc = htole16(4); 5983 cmd.cck_det_slope = htole16(476); 5984 cmd.cck_det_icept = htole16(99); 5985 send: 5986 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5987 } 5988 5989 /* 5990 * Look at the increase of PLCP errors over time; if it exceeds 5991 * a programmed threshold then trigger an RF retune. 5992 */ 5993 static void 5994 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 5995 { 5996 int32_t delta_ofdm, delta_ht, delta_cck; 5997 struct iwn_calib_state *calib = &sc->calib; 5998 int delta_ticks, cur_ticks; 5999 int delta_msec; 6000 int thresh; 6001 6002 /* 6003 * Calculate the difference between the current and 6004 * previous statistics. 6005 */ 6006 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 6007 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6008 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 6009 6010 /* 6011 * Calculate the delta in time between successive statistics 6012 * messages. Yes, it can roll over; so we make sure that 6013 * this doesn't happen. 6014 * 6015 * XXX go figure out what to do about rollover 6016 * XXX go figure out what to do if ticks rolls over to -ve instead! 6017 * XXX go stab signed integer overflow undefined-ness in the face. 6018 */ 6019 cur_ticks = ticks; 6020 delta_ticks = cur_ticks - sc->last_calib_ticks; 6021 6022 /* 6023 * If any are negative, then the firmware likely reset; so just 6024 * bail. We'll pick this up next time. 6025 */ 6026 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 6027 return; 6028 6029 /* 6030 * delta_ticks is in ticks; we need to convert it up to milliseconds 6031 * so we can do some useful math with it. 6032 */ 6033 delta_msec = ticks_to_msecs(delta_ticks); 6034 6035 /* 6036 * Calculate what our threshold is given the current delta_msec. 6037 */ 6038 thresh = sc->base_params->plcp_err_threshold * delta_msec; 6039 6040 DPRINTF(sc, IWN_DEBUG_STATE, 6041 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 6042 __func__, 6043 delta_msec, 6044 delta_cck, 6045 delta_ofdm, 6046 delta_ht, 6047 (delta_msec + delta_cck + delta_ofdm + delta_ht), 6048 thresh); 6049 6050 /* 6051 * If we need a retune, then schedule a single channel scan 6052 * to a channel that isn't the currently active one! 6053 * 6054 * The math from linux iwlwifi: 6055 * 6056 * if ((delta * 100 / msecs) > threshold) 6057 */ 6058 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 6059 DPRINTF(sc, IWN_DEBUG_ANY, 6060 "%s: PLCP error threshold raw (%d) comparison (%d) " 6061 "over limit (%d); retune!\n", 6062 __func__, 6063 (delta_cck + delta_ofdm + delta_ht), 6064 (delta_cck + delta_ofdm + delta_ht) * 100, 6065 thresh); 6066 } 6067 } 6068 6069 /* 6070 * Set STA mode power saving level (between 0 and 5). 6071 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 6072 */ 6073 static int 6074 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 6075 { 6076 struct iwn_pmgt_cmd cmd; 6077 const struct iwn_pmgt *pmgt; 6078 uint32_t max, skip_dtim; 6079 uint32_t reg; 6080 int i; 6081 6082 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 6083 "%s: dtim=%d, level=%d, async=%d\n", 6084 __func__, 6085 dtim, 6086 level, 6087 async); 6088 6089 /* Select which PS parameters to use. */ 6090 if (dtim <= 2) 6091 pmgt = &iwn_pmgt[0][level]; 6092 else if (dtim <= 10) 6093 pmgt = &iwn_pmgt[1][level]; 6094 else 6095 pmgt = &iwn_pmgt[2][level]; 6096 6097 memset(&cmd, 0, sizeof cmd); 6098 if (level != 0) /* not CAM */ 6099 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 6100 if (level == 5) 6101 cmd.flags |= htole16(IWN_PS_FAST_PD); 6102 /* Retrieve PCIe Active State Power Management (ASPM). */ 6103 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6104 if (!(reg & 0x1)) /* L0s Entry disabled. */ 6105 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 6106 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 6107 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 6108 6109 if (dtim == 0) { 6110 dtim = 1; 6111 skip_dtim = 0; 6112 } else 6113 skip_dtim = pmgt->skip_dtim; 6114 if (skip_dtim != 0) { 6115 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 6116 max = pmgt->intval[4]; 6117 if (max == (uint32_t)-1) 6118 max = dtim * (skip_dtim + 1); 6119 else if (max > dtim) 6120 max = (max / dtim) * dtim; 6121 } else 6122 max = dtim; 6123 for (i = 0; i < 5; i++) 6124 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 6125 6126 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 6127 level); 6128 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 6129 } 6130 6131 static int 6132 iwn_send_btcoex(struct iwn_softc *sc) 6133 { 6134 struct iwn_bluetooth cmd; 6135 6136 memset(&cmd, 0, sizeof cmd); 6137 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 6138 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 6139 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 6140 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 6141 __func__); 6142 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 6143 } 6144 6145 static int 6146 iwn_send_advanced_btcoex(struct iwn_softc *sc) 6147 { 6148 static const uint32_t btcoex_3wire[12] = { 6149 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 6150 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 6151 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 6152 }; 6153 struct iwn6000_btcoex_config btconfig; 6154 struct iwn2000_btcoex_config btconfig2k; 6155 struct iwn_btcoex_priotable btprio; 6156 struct iwn_btcoex_prot btprot; 6157 int error, i; 6158 uint8_t flags; 6159 6160 memset(&btconfig, 0, sizeof btconfig); 6161 memset(&btconfig2k, 0, sizeof btconfig2k); 6162 6163 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6164 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6165 6166 if (sc->base_params->bt_sco_disable) 6167 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6168 else 6169 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6170 6171 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6172 6173 /* Default flags result is 145 as old value */ 6174 6175 /* 6176 * Flags value has to be review. Values must change if we 6177 * which to disable it 6178 */ 6179 if (sc->base_params->bt_session_2) { 6180 btconfig2k.flags = flags; 6181 btconfig2k.max_kill = 5; 6182 btconfig2k.bt3_t7_timer = 1; 6183 btconfig2k.kill_ack = htole32(0xffff0000); 6184 btconfig2k.kill_cts = htole32(0xffff0000); 6185 btconfig2k.sample_time = 2; 6186 btconfig2k.bt3_t2_timer = 0xc; 6187 6188 for (i = 0; i < 12; i++) 6189 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6190 btconfig2k.valid = htole16(0xff); 6191 btconfig2k.prio_boost = htole32(0xf0); 6192 DPRINTF(sc, IWN_DEBUG_RESET, 6193 "%s: configuring advanced bluetooth coexistence" 6194 " session 2, flags : 0x%x\n", 6195 __func__, 6196 flags); 6197 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6198 sizeof(btconfig2k), 1); 6199 } else { 6200 btconfig.flags = flags; 6201 btconfig.max_kill = 5; 6202 btconfig.bt3_t7_timer = 1; 6203 btconfig.kill_ack = htole32(0xffff0000); 6204 btconfig.kill_cts = htole32(0xffff0000); 6205 btconfig.sample_time = 2; 6206 btconfig.bt3_t2_timer = 0xc; 6207 6208 for (i = 0; i < 12; i++) 6209 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6210 btconfig.valid = htole16(0xff); 6211 btconfig.prio_boost = 0xf0; 6212 DPRINTF(sc, IWN_DEBUG_RESET, 6213 "%s: configuring advanced bluetooth coexistence," 6214 " flags : 0x%x\n", 6215 __func__, 6216 flags); 6217 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6218 sizeof(btconfig), 1); 6219 } 6220 6221 if (error != 0) 6222 return error; 6223 6224 memset(&btprio, 0, sizeof btprio); 6225 btprio.calib_init1 = 0x6; 6226 btprio.calib_init2 = 0x7; 6227 btprio.calib_periodic_low1 = 0x2; 6228 btprio.calib_periodic_low2 = 0x3; 6229 btprio.calib_periodic_high1 = 0x4; 6230 btprio.calib_periodic_high2 = 0x5; 6231 btprio.dtim = 0x6; 6232 btprio.scan52 = 0x8; 6233 btprio.scan24 = 0xa; 6234 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6235 1); 6236 if (error != 0) 6237 return error; 6238 6239 /* Force BT state machine change. */ 6240 memset(&btprot, 0, sizeof btprot); 6241 btprot.open = 1; 6242 btprot.type = 1; 6243 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6244 if (error != 0) 6245 return error; 6246 btprot.open = 0; 6247 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6248 } 6249 6250 static int 6251 iwn5000_runtime_calib(struct iwn_softc *sc) 6252 { 6253 struct iwn5000_calib_config cmd; 6254 6255 memset(&cmd, 0, sizeof cmd); 6256 cmd.ucode.once.enable = 0xffffffff; 6257 cmd.ucode.once.start = IWN5000_CALIB_DC; 6258 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6259 "%s: configuring runtime calibration\n", __func__); 6260 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6261 } 6262 6263 static int 6264 iwn_config(struct iwn_softc *sc) 6265 { 6266 struct iwn_ops *ops = &sc->ops; 6267 struct ifnet *ifp = sc->sc_ifp; 6268 struct ieee80211com *ic = ifp->if_l2com; 6269 uint32_t txmask; 6270 uint16_t rxchain; 6271 int error; 6272 6273 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6274 6275 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6276 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6277 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6278 " exclusive each together. Review NIC config file. Conf" 6279 " : 0x%08x Flags : 0x%08x \n", __func__, 6280 sc->base_params->calib_need, 6281 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6282 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6283 return (EINVAL); 6284 } 6285 6286 /* Compute temperature calib if needed. Will be send by send calib */ 6287 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6288 error = iwn5000_temp_offset_calib(sc); 6289 if (error != 0) { 6290 device_printf(sc->sc_dev, 6291 "%s: could not set temperature offset\n", __func__); 6292 return (error); 6293 } 6294 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6295 error = iwn5000_temp_offset_calibv2(sc); 6296 if (error != 0) { 6297 device_printf(sc->sc_dev, 6298 "%s: could not compute temperature offset v2\n", 6299 __func__); 6300 return (error); 6301 } 6302 } 6303 6304 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6305 /* Configure runtime DC calibration. */ 6306 error = iwn5000_runtime_calib(sc); 6307 if (error != 0) { 6308 device_printf(sc->sc_dev, 6309 "%s: could not configure runtime calibration\n", 6310 __func__); 6311 return error; 6312 } 6313 } 6314 6315 /* Configure valid TX chains for >=5000 Series. */ 6316 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6317 txmask = htole32(sc->txchainmask); 6318 DPRINTF(sc, IWN_DEBUG_RESET, 6319 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6320 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6321 sizeof txmask, 0); 6322 if (error != 0) { 6323 device_printf(sc->sc_dev, 6324 "%s: could not configure valid TX chains, " 6325 "error %d\n", __func__, error); 6326 return error; 6327 } 6328 } 6329 6330 /* Configure bluetooth coexistence. */ 6331 error = 0; 6332 6333 /* Configure bluetooth coexistence if needed. */ 6334 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6335 error = iwn_send_advanced_btcoex(sc); 6336 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6337 error = iwn_send_btcoex(sc); 6338 6339 if (error != 0) { 6340 device_printf(sc->sc_dev, 6341 "%s: could not configure bluetooth coexistence, error %d\n", 6342 __func__, error); 6343 return error; 6344 } 6345 6346 /* Set mode, channel, RX filter and enable RX. */ 6347 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6348 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6349 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6350 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6351 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6352 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6353 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6354 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6355 switch (ic->ic_opmode) { 6356 case IEEE80211_M_STA: 6357 sc->rxon->mode = IWN_MODE_STA; 6358 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6359 break; 6360 case IEEE80211_M_MONITOR: 6361 sc->rxon->mode = IWN_MODE_MONITOR; 6362 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6363 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6364 break; 6365 default: 6366 /* Should not get there. */ 6367 break; 6368 } 6369 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6370 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6371 sc->rxon->ht_single_mask = 0xff; 6372 sc->rxon->ht_dual_mask = 0xff; 6373 sc->rxon->ht_triple_mask = 0xff; 6374 rxchain = 6375 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6376 IWN_RXCHAIN_MIMO_COUNT(2) | 6377 IWN_RXCHAIN_IDLE_COUNT(2); 6378 sc->rxon->rxchain = htole16(rxchain); 6379 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6380 if (sc->sc_is_scanning) 6381 device_printf(sc->sc_dev, 6382 "%s: is_scanning set, before RXON\n", 6383 __func__); 6384 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6385 if (error != 0) { 6386 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6387 __func__); 6388 return error; 6389 } 6390 6391 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6392 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6393 __func__); 6394 return error; 6395 } 6396 6397 /* Configuration has changed, set TX power accordingly. */ 6398 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6399 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6400 __func__); 6401 return error; 6402 } 6403 6404 if ((error = iwn_set_critical_temp(sc)) != 0) { 6405 device_printf(sc->sc_dev, 6406 "%s: could not set critical temperature\n", __func__); 6407 return error; 6408 } 6409 6410 /* Set power saving level to CAM during initialization. */ 6411 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6412 device_printf(sc->sc_dev, 6413 "%s: could not set power saving level\n", __func__); 6414 return error; 6415 } 6416 6417 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6418 6419 return 0; 6420 } 6421 6422 /* 6423 * Add an ssid element to a frame. 6424 */ 6425 static uint8_t * 6426 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6427 { 6428 *frm++ = IEEE80211_ELEMID_SSID; 6429 *frm++ = len; 6430 memcpy(frm, ssid, len); 6431 return frm + len; 6432 } 6433 6434 static uint16_t 6435 iwn_get_active_dwell_time(struct iwn_softc *sc, 6436 struct ieee80211_channel *c, uint8_t n_probes) 6437 { 6438 /* No channel? Default to 2GHz settings */ 6439 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6440 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6441 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6442 } 6443 6444 /* 5GHz dwell time */ 6445 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6446 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6447 } 6448 6449 /* 6450 * Limit the total dwell time to 85% of the beacon interval. 6451 * 6452 * Returns the dwell time in milliseconds. 6453 */ 6454 static uint16_t 6455 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6456 { 6457 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6458 struct ieee80211vap *vap = NULL; 6459 int bintval = 0; 6460 6461 /* bintval is in TU (1.024mS) */ 6462 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6463 vap = TAILQ_FIRST(&ic->ic_vaps); 6464 bintval = vap->iv_bss->ni_intval; 6465 } 6466 6467 /* 6468 * If it's non-zero, we should calculate the minimum of 6469 * it and the DWELL_BASE. 6470 * 6471 * XXX Yes, the math should take into account that bintval 6472 * is 1.024mS, not 1mS.. 6473 */ 6474 if (bintval > 0) { 6475 DPRINTF(sc, IWN_DEBUG_SCAN, 6476 "%s: bintval=%d\n", 6477 __func__, 6478 bintval); 6479 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6480 } 6481 6482 /* No association context? Default */ 6483 return (IWN_PASSIVE_DWELL_BASE); 6484 } 6485 6486 static uint16_t 6487 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6488 { 6489 uint16_t passive; 6490 6491 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6492 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6493 } else { 6494 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6495 } 6496 6497 /* Clamp to the beacon interval if we're associated */ 6498 return (iwn_limit_dwell(sc, passive)); 6499 } 6500 6501 static int 6502 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 6503 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 6504 { 6505 struct ifnet *ifp = sc->sc_ifp; 6506 struct ieee80211com *ic = ifp->if_l2com; 6507 struct ieee80211_node *ni = vap->iv_bss; 6508 struct iwn_scan_hdr *hdr; 6509 struct iwn_cmd_data *tx; 6510 struct iwn_scan_essid *essid; 6511 struct iwn_scan_chan *chan; 6512 struct ieee80211_frame *wh; 6513 struct ieee80211_rateset *rs; 6514 uint8_t *buf, *frm; 6515 uint16_t rxchain; 6516 uint8_t txant; 6517 int buflen, error; 6518 int is_active; 6519 uint16_t dwell_active, dwell_passive; 6520 uint32_t extra, scan_service_time; 6521 6522 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6523 6524 /* 6525 * We are absolutely not allowed to send a scan command when another 6526 * scan command is pending. 6527 */ 6528 if (sc->sc_is_scanning) { 6529 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6530 __func__); 6531 return (EAGAIN); 6532 } 6533 6534 /* Assign the scan channel */ 6535 c = ic->ic_curchan; 6536 6537 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6538 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 6539 if (buf == NULL) { 6540 device_printf(sc->sc_dev, 6541 "%s: could not allocate buffer for scan command\n", 6542 __func__); 6543 return ENOMEM; 6544 } 6545 hdr = (struct iwn_scan_hdr *)buf; 6546 /* 6547 * Move to the next channel if no frames are received within 10ms 6548 * after sending the probe request. 6549 */ 6550 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6551 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6552 /* 6553 * Max needs to be greater than active and passive and quiet! 6554 * It's also in microseconds! 6555 */ 6556 hdr->max_svc = htole32(250 * 1024); 6557 6558 /* 6559 * Reset scan: interval=100 6560 * Normal scan: interval=becaon interval 6561 * suspend_time: 100 (TU) 6562 * 6563 */ 6564 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6565 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6566 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6567 hdr->pause_svc = htole32(scan_service_time); 6568 6569 /* Select antennas for scanning. */ 6570 rxchain = 6571 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6572 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6573 IWN_RXCHAIN_DRIVER_FORCE; 6574 if (IEEE80211_IS_CHAN_A(c) && 6575 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6576 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6577 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6578 } else /* Use all available RX antennas. */ 6579 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6580 hdr->rxchain = htole16(rxchain); 6581 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6582 6583 tx = (struct iwn_cmd_data *)(hdr + 1); 6584 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6585 tx->id = sc->broadcast_id; 6586 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6587 6588 if (IEEE80211_IS_CHAN_5GHZ(c)) { 6589 /* Send probe requests at 6Mbps. */ 6590 tx->rate = htole32(0xd); 6591 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6592 } else { 6593 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6594 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6595 sc->rxon->associd && sc->rxon->chan > 14) 6596 tx->rate = htole32(0xd); 6597 else { 6598 /* Send probe requests at 1Mbps. */ 6599 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6600 } 6601 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6602 } 6603 /* Use the first valid TX antenna. */ 6604 txant = IWN_LSB(sc->txchainmask); 6605 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6606 6607 /* 6608 * Only do active scanning if we're announcing a probe request 6609 * for a given SSID (or more, if we ever add it to the driver.) 6610 */ 6611 is_active = 0; 6612 6613 /* 6614 * If we're scanning for a specific SSID, add it to the command. 6615 * 6616 * XXX maybe look at adding support for scanning multiple SSIDs? 6617 */ 6618 essid = (struct iwn_scan_essid *)(tx + 1); 6619 if (ss != NULL) { 6620 if (ss->ss_ssid[0].len != 0) { 6621 essid[0].id = IEEE80211_ELEMID_SSID; 6622 essid[0].len = ss->ss_ssid[0].len; 6623 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6624 } 6625 6626 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6627 __func__, 6628 ss->ss_ssid[0].len, 6629 ss->ss_ssid[0].len, 6630 ss->ss_ssid[0].ssid); 6631 6632 if (ss->ss_nssid > 0) 6633 is_active = 1; 6634 } 6635 6636 /* 6637 * Build a probe request frame. Most of the following code is a 6638 * copy & paste of what is done in net80211. 6639 */ 6640 wh = (struct ieee80211_frame *)(essid + 20); 6641 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6642 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6643 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6644 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6645 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6646 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6647 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6648 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6649 6650 frm = (uint8_t *)(wh + 1); 6651 frm = ieee80211_add_ssid(frm, NULL, 0); 6652 frm = ieee80211_add_rates(frm, rs); 6653 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6654 frm = ieee80211_add_xrates(frm, rs); 6655 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6656 frm = ieee80211_add_htcap(frm, ni); 6657 6658 /* Set length of probe request. */ 6659 tx->len = htole16(frm - (uint8_t *)wh); 6660 6661 /* 6662 * If active scanning is requested but a certain channel is 6663 * marked passive, we can do active scanning if we detect 6664 * transmissions. 6665 * 6666 * There is an issue with some firmware versions that triggers 6667 * a sysassert on a "good CRC threshold" of zero (== disabled), 6668 * on a radar channel even though this means that we should NOT 6669 * send probes. 6670 * 6671 * The "good CRC threshold" is the number of frames that we 6672 * need to receive during our dwell time on a channel before 6673 * sending out probes -- setting this to a huge value will 6674 * mean we never reach it, but at the same time work around 6675 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6676 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6677 * 6678 * This was fixed in later versions along with some other 6679 * scan changes, and the threshold behaves as a flag in those 6680 * versions. 6681 */ 6682 6683 /* 6684 * If we're doing active scanning, set the crc_threshold 6685 * to a suitable value. This is different to active veruss 6686 * passive scanning depending upon the channel flags; the 6687 * firmware will obey that particular check for us. 6688 */ 6689 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6690 hdr->crc_threshold = is_active ? 6691 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6692 else 6693 hdr->crc_threshold = is_active ? 6694 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6695 6696 chan = (struct iwn_scan_chan *)frm; 6697 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6698 chan->flags = 0; 6699 if (ss->ss_nssid > 0) 6700 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6701 chan->dsp_gain = 0x6e; 6702 6703 /* 6704 * Set the passive/active flag depending upon the channel mode. 6705 * XXX TODO: take the is_active flag into account as well? 6706 */ 6707 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6708 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6709 else 6710 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6711 6712 /* 6713 * Calculate the active/passive dwell times. 6714 */ 6715 6716 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6717 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6718 6719 /* Make sure they're valid */ 6720 if (dwell_passive <= dwell_active) 6721 dwell_passive = dwell_active + 1; 6722 6723 chan->active = htole16(dwell_active); 6724 chan->passive = htole16(dwell_passive); 6725 6726 if (IEEE80211_IS_CHAN_5GHZ(c) && 6727 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6728 chan->rf_gain = 0x3b; 6729 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6730 chan->rf_gain = 0x3b; 6731 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6732 chan->rf_gain = 0x28; 6733 } else { 6734 chan->rf_gain = 0x28; 6735 } 6736 6737 DPRINTF(sc, IWN_DEBUG_STATE, 6738 "%s: chan %u flags 0x%x rf_gain 0x%x " 6739 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6740 "isactive=%d numssid=%d\n", __func__, 6741 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6742 dwell_active, dwell_passive, scan_service_time, 6743 hdr->crc_threshold, is_active, ss->ss_nssid); 6744 6745 hdr->nchan++; 6746 chan++; 6747 buflen = (uint8_t *)chan - buf; 6748 hdr->len = htole16(buflen); 6749 6750 if (sc->sc_is_scanning) { 6751 device_printf(sc->sc_dev, 6752 "%s: called with is_scanning set!\n", 6753 __func__); 6754 } 6755 sc->sc_is_scanning = 1; 6756 6757 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6758 hdr->nchan); 6759 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6760 free(buf, M_DEVBUF); 6761 6762 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6763 6764 return error; 6765 } 6766 6767 static int 6768 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6769 { 6770 struct iwn_ops *ops = &sc->ops; 6771 struct ifnet *ifp = sc->sc_ifp; 6772 struct ieee80211com *ic = ifp->if_l2com; 6773 struct ieee80211_node *ni = vap->iv_bss; 6774 int error; 6775 6776 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6777 6778 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6779 /* Update adapter configuration. */ 6780 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6781 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6782 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6783 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6784 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6785 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6786 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6787 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6788 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6789 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6790 sc->rxon->cck_mask = 0; 6791 sc->rxon->ofdm_mask = 0x15; 6792 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6793 sc->rxon->cck_mask = 0x03; 6794 sc->rxon->ofdm_mask = 0; 6795 } else { 6796 /* Assume 802.11b/g. */ 6797 sc->rxon->cck_mask = 0x03; 6798 sc->rxon->ofdm_mask = 0x15; 6799 } 6800 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6801 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6802 sc->rxon->ofdm_mask); 6803 if (sc->sc_is_scanning) 6804 device_printf(sc->sc_dev, 6805 "%s: is_scanning set, before RXON\n", 6806 __func__); 6807 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6808 if (error != 0) { 6809 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6810 __func__, error); 6811 return error; 6812 } 6813 6814 /* Configuration has changed, set TX power accordingly. */ 6815 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6816 device_printf(sc->sc_dev, 6817 "%s: could not set TX power, error %d\n", __func__, error); 6818 return error; 6819 } 6820 /* 6821 * Reconfiguring RXON clears the firmware nodes table so we must 6822 * add the broadcast node again. 6823 */ 6824 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6825 device_printf(sc->sc_dev, 6826 "%s: could not add broadcast node, error %d\n", __func__, 6827 error); 6828 return error; 6829 } 6830 6831 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6832 6833 return 0; 6834 } 6835 6836 static int 6837 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6838 { 6839 struct iwn_ops *ops = &sc->ops; 6840 struct ifnet *ifp = sc->sc_ifp; 6841 struct ieee80211com *ic = ifp->if_l2com; 6842 struct ieee80211_node *ni = vap->iv_bss; 6843 struct iwn_node_info node; 6844 uint32_t htflags = 0; 6845 int error; 6846 6847 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6848 6849 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6850 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6851 /* Link LED blinks while monitoring. */ 6852 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6853 return 0; 6854 } 6855 if ((error = iwn_set_timing(sc, ni)) != 0) { 6856 device_printf(sc->sc_dev, 6857 "%s: could not set timing, error %d\n", __func__, error); 6858 return error; 6859 } 6860 6861 /* Update adapter configuration. */ 6862 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6863 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6864 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6865 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6866 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6867 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6868 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6869 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6870 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6871 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6872 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6873 sc->rxon->cck_mask = 0; 6874 sc->rxon->ofdm_mask = 0x15; 6875 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6876 sc->rxon->cck_mask = 0x03; 6877 sc->rxon->ofdm_mask = 0; 6878 } else { 6879 /* Assume 802.11b/g. */ 6880 sc->rxon->cck_mask = 0x0f; 6881 sc->rxon->ofdm_mask = 0x15; 6882 } 6883 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6884 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6885 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6886 switch (ic->ic_curhtprotmode) { 6887 case IEEE80211_HTINFO_OPMODE_HT20PR: 6888 htflags |= IWN_RXON_HT_MODEPURE40; 6889 break; 6890 default: 6891 htflags |= IWN_RXON_HT_MODEMIXED; 6892 break; 6893 } 6894 } 6895 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6896 htflags |= IWN_RXON_HT_HT40MINUS; 6897 } 6898 sc->rxon->flags |= htole32(htflags); 6899 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6900 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6901 sc->rxon->chan, sc->rxon->flags); 6902 if (sc->sc_is_scanning) 6903 device_printf(sc->sc_dev, 6904 "%s: is_scanning set, before RXON\n", 6905 __func__); 6906 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6907 if (error != 0) { 6908 device_printf(sc->sc_dev, 6909 "%s: could not update configuration, error %d\n", __func__, 6910 error); 6911 return error; 6912 } 6913 6914 /* Configuration has changed, set TX power accordingly. */ 6915 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6916 device_printf(sc->sc_dev, 6917 "%s: could not set TX power, error %d\n", __func__, error); 6918 return error; 6919 } 6920 6921 /* Fake a join to initialize the TX rate. */ 6922 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6923 iwn_newassoc(ni, 1); 6924 6925 /* Add BSS node. */ 6926 memset(&node, 0, sizeof node); 6927 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6928 node.id = IWN_ID_BSS; 6929 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6930 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6931 case IEEE80211_HTCAP_SMPS_ENA: 6932 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6933 break; 6934 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6935 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6936 break; 6937 } 6938 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6939 IWN_AMDPU_DENSITY(5)); /* 4us */ 6940 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6941 node.htflags |= htole32(IWN_NODE_HT40); 6942 } 6943 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6944 error = ops->add_node(sc, &node, 1); 6945 if (error != 0) { 6946 device_printf(sc->sc_dev, 6947 "%s: could not add BSS node, error %d\n", __func__, error); 6948 return error; 6949 } 6950 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6951 __func__, node.id); 6952 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6953 device_printf(sc->sc_dev, 6954 "%s: could not setup link quality for node %d, error %d\n", 6955 __func__, node.id, error); 6956 return error; 6957 } 6958 6959 if ((error = iwn_init_sensitivity(sc)) != 0) { 6960 device_printf(sc->sc_dev, 6961 "%s: could not set sensitivity, error %d\n", __func__, 6962 error); 6963 return error; 6964 } 6965 /* Start periodic calibration timer. */ 6966 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6967 sc->calib_cnt = 0; 6968 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6969 sc); 6970 6971 /* Link LED always on while associated. */ 6972 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6973 6974 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6975 6976 return 0; 6977 } 6978 6979 /* 6980 * This function is called by upper layer when an ADDBA request is received 6981 * from another STA and before the ADDBA response is sent. 6982 */ 6983 static int 6984 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6985 int baparamset, int batimeout, int baseqctl) 6986 { 6987 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6988 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6989 struct iwn_ops *ops = &sc->ops; 6990 struct iwn_node *wn = (void *)ni; 6991 struct iwn_node_info node; 6992 uint16_t ssn; 6993 uint8_t tid; 6994 int error; 6995 6996 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6997 6998 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6999 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 7000 7001 memset(&node, 0, sizeof node); 7002 node.id = wn->id; 7003 node.control = IWN_NODE_UPDATE; 7004 node.flags = IWN_FLAG_SET_ADDBA; 7005 node.addba_tid = tid; 7006 node.addba_ssn = htole16(ssn); 7007 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 7008 wn->id, tid, ssn); 7009 error = ops->add_node(sc, &node, 1); 7010 if (error != 0) 7011 return error; 7012 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 7013 #undef MS 7014 } 7015 7016 /* 7017 * This function is called by upper layer on teardown of an HT-immediate 7018 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 7019 */ 7020 static void 7021 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 7022 { 7023 struct ieee80211com *ic = ni->ni_ic; 7024 struct iwn_softc *sc = ic->ic_ifp->if_softc; 7025 struct iwn_ops *ops = &sc->ops; 7026 struct iwn_node *wn = (void *)ni; 7027 struct iwn_node_info node; 7028 uint8_t tid; 7029 7030 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7031 7032 /* XXX: tid as an argument */ 7033 for (tid = 0; tid < WME_NUM_TID; tid++) { 7034 if (&ni->ni_rx_ampdu[tid] == rap) 7035 break; 7036 } 7037 7038 memset(&node, 0, sizeof node); 7039 node.id = wn->id; 7040 node.control = IWN_NODE_UPDATE; 7041 node.flags = IWN_FLAG_SET_DELBA; 7042 node.delba_tid = tid; 7043 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 7044 (void)ops->add_node(sc, &node, 1); 7045 sc->sc_ampdu_rx_stop(ni, rap); 7046 } 7047 7048 static int 7049 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7050 int dialogtoken, int baparamset, int batimeout) 7051 { 7052 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7053 int qid; 7054 7055 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7056 7057 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 7058 if (sc->qid2tap[qid] == NULL) 7059 break; 7060 } 7061 if (qid == sc->ntxqs) { 7062 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 7063 __func__); 7064 return 0; 7065 } 7066 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 7067 if (tap->txa_private == NULL) { 7068 device_printf(sc->sc_dev, 7069 "%s: failed to alloc TX aggregation structure\n", __func__); 7070 return 0; 7071 } 7072 sc->qid2tap[qid] = tap; 7073 *(int *)tap->txa_private = qid; 7074 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 7075 batimeout); 7076 } 7077 7078 static int 7079 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7080 int code, int baparamset, int batimeout) 7081 { 7082 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7083 int qid = *(int *)tap->txa_private; 7084 uint8_t tid = tap->txa_tid; 7085 int ret; 7086 7087 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7088 7089 if (code == IEEE80211_STATUS_SUCCESS) { 7090 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 7091 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 7092 if (ret != 1) 7093 return ret; 7094 } else { 7095 sc->qid2tap[qid] = NULL; 7096 free(tap->txa_private, M_DEVBUF); 7097 tap->txa_private = NULL; 7098 } 7099 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 7100 } 7101 7102 /* 7103 * This function is called by upper layer when an ADDBA response is received 7104 * from another STA. 7105 */ 7106 static int 7107 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 7108 uint8_t tid) 7109 { 7110 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 7111 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7112 struct iwn_ops *ops = &sc->ops; 7113 struct iwn_node *wn = (void *)ni; 7114 struct iwn_node_info node; 7115 int error, qid; 7116 7117 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7118 7119 /* Enable TX for the specified RA/TID. */ 7120 wn->disable_tid &= ~(1 << tid); 7121 memset(&node, 0, sizeof node); 7122 node.id = wn->id; 7123 node.control = IWN_NODE_UPDATE; 7124 node.flags = IWN_FLAG_SET_DISABLE_TID; 7125 node.disable_tid = htole16(wn->disable_tid); 7126 error = ops->add_node(sc, &node, 1); 7127 if (error != 0) 7128 return 0; 7129 7130 if ((error = iwn_nic_lock(sc)) != 0) 7131 return 0; 7132 qid = *(int *)tap->txa_private; 7133 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 7134 __func__, wn->id, tid, tap->txa_start, qid); 7135 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 7136 iwn_nic_unlock(sc); 7137 7138 iwn_set_link_quality(sc, ni); 7139 return 1; 7140 } 7141 7142 static void 7143 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 7144 { 7145 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7146 struct iwn_ops *ops = &sc->ops; 7147 uint8_t tid = tap->txa_tid; 7148 int qid; 7149 7150 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7151 7152 sc->sc_addba_stop(ni, tap); 7153 7154 if (tap->txa_private == NULL) 7155 return; 7156 7157 qid = *(int *)tap->txa_private; 7158 if (sc->txq[qid].queued != 0) 7159 return; 7160 if (iwn_nic_lock(sc) != 0) 7161 return; 7162 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7163 iwn_nic_unlock(sc); 7164 sc->qid2tap[qid] = NULL; 7165 free(tap->txa_private, M_DEVBUF); 7166 tap->txa_private = NULL; 7167 } 7168 7169 static void 7170 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7171 int qid, uint8_t tid, uint16_t ssn) 7172 { 7173 struct iwn_node *wn = (void *)ni; 7174 7175 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7176 7177 /* Stop TX scheduler while we're changing its configuration. */ 7178 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7179 IWN4965_TXQ_STATUS_CHGACT); 7180 7181 /* Assign RA/TID translation to the queue. */ 7182 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7183 wn->id << 4 | tid); 7184 7185 /* Enable chain-building mode for the queue. */ 7186 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7187 7188 /* Set starting sequence number from the ADDBA request. */ 7189 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7190 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7191 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7192 7193 /* Set scheduler window size. */ 7194 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7195 IWN_SCHED_WINSZ); 7196 /* Set scheduler frame limit. */ 7197 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7198 IWN_SCHED_LIMIT << 16); 7199 7200 /* Enable interrupts for the queue. */ 7201 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7202 7203 /* Mark the queue as active. */ 7204 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7205 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7206 iwn_tid2fifo[tid] << 1); 7207 } 7208 7209 static void 7210 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7211 { 7212 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7213 7214 /* Stop TX scheduler while we're changing its configuration. */ 7215 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7216 IWN4965_TXQ_STATUS_CHGACT); 7217 7218 /* Set starting sequence number from the ADDBA request. */ 7219 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7220 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7221 7222 /* Disable interrupts for the queue. */ 7223 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7224 7225 /* Mark the queue as inactive. */ 7226 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7227 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7228 } 7229 7230 static void 7231 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7232 int qid, uint8_t tid, uint16_t ssn) 7233 { 7234 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7235 7236 struct iwn_node *wn = (void *)ni; 7237 7238 /* Stop TX scheduler while we're changing its configuration. */ 7239 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7240 IWN5000_TXQ_STATUS_CHGACT); 7241 7242 /* Assign RA/TID translation to the queue. */ 7243 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7244 wn->id << 4 | tid); 7245 7246 /* Enable chain-building mode for the queue. */ 7247 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7248 7249 /* Enable aggregation for the queue. */ 7250 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7251 7252 /* Set starting sequence number from the ADDBA request. */ 7253 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7254 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7255 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7256 7257 /* Set scheduler window size and frame limit. */ 7258 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7259 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7260 7261 /* Enable interrupts for the queue. */ 7262 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7263 7264 /* Mark the queue as active. */ 7265 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7266 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7267 } 7268 7269 static void 7270 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7271 { 7272 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7273 7274 /* Stop TX scheduler while we're changing its configuration. */ 7275 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7276 IWN5000_TXQ_STATUS_CHGACT); 7277 7278 /* Disable aggregation for the queue. */ 7279 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7280 7281 /* Set starting sequence number from the ADDBA request. */ 7282 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7283 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7284 7285 /* Disable interrupts for the queue. */ 7286 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7287 7288 /* Mark the queue as inactive. */ 7289 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7290 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7291 } 7292 7293 /* 7294 * Query calibration tables from the initialization firmware. We do this 7295 * only once at first boot. Called from a process context. 7296 */ 7297 static int 7298 iwn5000_query_calibration(struct iwn_softc *sc) 7299 { 7300 struct iwn5000_calib_config cmd; 7301 int error; 7302 7303 memset(&cmd, 0, sizeof cmd); 7304 cmd.ucode.once.enable = htole32(0xffffffff); 7305 cmd.ucode.once.start = htole32(0xffffffff); 7306 cmd.ucode.once.send = htole32(0xffffffff); 7307 cmd.ucode.flags = htole32(0xffffffff); 7308 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7309 __func__); 7310 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7311 if (error != 0) 7312 return error; 7313 7314 /* Wait at most two seconds for calibration to complete. */ 7315 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7316 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7317 return error; 7318 } 7319 7320 /* 7321 * Send calibration results to the runtime firmware. These results were 7322 * obtained on first boot from the initialization firmware. 7323 */ 7324 static int 7325 iwn5000_send_calibration(struct iwn_softc *sc) 7326 { 7327 int idx, error; 7328 7329 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7330 if (!(sc->base_params->calib_need & (1<<idx))) { 7331 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7332 "No need of calib %d\n", 7333 idx); 7334 continue; /* no need for this calib */ 7335 } 7336 if (sc->calibcmd[idx].buf == NULL) { 7337 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7338 "Need calib idx : %d but no available data\n", 7339 idx); 7340 continue; 7341 } 7342 7343 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7344 "send calibration result idx=%d len=%d\n", idx, 7345 sc->calibcmd[idx].len); 7346 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7347 sc->calibcmd[idx].len, 0); 7348 if (error != 0) { 7349 device_printf(sc->sc_dev, 7350 "%s: could not send calibration result, error %d\n", 7351 __func__, error); 7352 return error; 7353 } 7354 } 7355 return 0; 7356 } 7357 7358 static int 7359 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7360 { 7361 struct iwn5000_wimax_coex wimax; 7362 7363 #if 0 7364 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7365 /* Enable WiMAX coexistence for combo adapters. */ 7366 wimax.flags = 7367 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7368 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7369 IWN_WIMAX_COEX_STA_TABLE_VALID | 7370 IWN_WIMAX_COEX_ENABLE; 7371 memcpy(wimax.events, iwn6050_wimax_events, 7372 sizeof iwn6050_wimax_events); 7373 } else 7374 #endif 7375 { 7376 /* Disable WiMAX coexistence. */ 7377 wimax.flags = 0; 7378 memset(wimax.events, 0, sizeof wimax.events); 7379 } 7380 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7381 __func__); 7382 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7383 } 7384 7385 static int 7386 iwn5000_crystal_calib(struct iwn_softc *sc) 7387 { 7388 struct iwn5000_phy_calib_crystal cmd; 7389 7390 memset(&cmd, 0, sizeof cmd); 7391 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7392 cmd.ngroups = 1; 7393 cmd.isvalid = 1; 7394 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7395 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7396 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7397 cmd.cap_pin[0], cmd.cap_pin[1]); 7398 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7399 } 7400 7401 static int 7402 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7403 { 7404 struct iwn5000_phy_calib_temp_offset cmd; 7405 7406 memset(&cmd, 0, sizeof cmd); 7407 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7408 cmd.ngroups = 1; 7409 cmd.isvalid = 1; 7410 if (sc->eeprom_temp != 0) 7411 cmd.offset = htole16(sc->eeprom_temp); 7412 else 7413 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7414 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7415 le16toh(cmd.offset)); 7416 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7417 } 7418 7419 static int 7420 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7421 { 7422 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7423 7424 memset(&cmd, 0, sizeof cmd); 7425 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7426 cmd.ngroups = 1; 7427 cmd.isvalid = 1; 7428 if (sc->eeprom_temp != 0) { 7429 cmd.offset_low = htole16(sc->eeprom_temp); 7430 cmd.offset_high = htole16(sc->eeprom_temp_high); 7431 } else { 7432 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7433 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7434 } 7435 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7436 7437 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7438 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7439 le16toh(cmd.offset_low), 7440 le16toh(cmd.offset_high), 7441 le16toh(cmd.burnt_voltage_ref)); 7442 7443 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7444 } 7445 7446 /* 7447 * This function is called after the runtime firmware notifies us of its 7448 * readiness (called in a process context). 7449 */ 7450 static int 7451 iwn4965_post_alive(struct iwn_softc *sc) 7452 { 7453 int error, qid; 7454 7455 if ((error = iwn_nic_lock(sc)) != 0) 7456 return error; 7457 7458 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7459 7460 /* Clear TX scheduler state in SRAM. */ 7461 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7462 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7463 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7464 7465 /* Set physical address of TX scheduler rings (1KB aligned). */ 7466 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7467 7468 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7469 7470 /* Disable chain mode for all our 16 queues. */ 7471 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7472 7473 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7474 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7475 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7476 7477 /* Set scheduler window size. */ 7478 iwn_mem_write(sc, sc->sched_base + 7479 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7480 /* Set scheduler frame limit. */ 7481 iwn_mem_write(sc, sc->sched_base + 7482 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7483 IWN_SCHED_LIMIT << 16); 7484 } 7485 7486 /* Enable interrupts for all our 16 queues. */ 7487 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7488 /* Identify TX FIFO rings (0-7). */ 7489 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7490 7491 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7492 for (qid = 0; qid < 7; qid++) { 7493 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7494 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7495 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7496 } 7497 iwn_nic_unlock(sc); 7498 return 0; 7499 } 7500 7501 /* 7502 * This function is called after the initialization or runtime firmware 7503 * notifies us of its readiness (called in a process context). 7504 */ 7505 static int 7506 iwn5000_post_alive(struct iwn_softc *sc) 7507 { 7508 int error, qid; 7509 7510 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7511 7512 /* Switch to using ICT interrupt mode. */ 7513 iwn5000_ict_reset(sc); 7514 7515 if ((error = iwn_nic_lock(sc)) != 0){ 7516 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7517 return error; 7518 } 7519 7520 /* Clear TX scheduler state in SRAM. */ 7521 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7522 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7523 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7524 7525 /* Set physical address of TX scheduler rings (1KB aligned). */ 7526 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7527 7528 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7529 7530 /* Enable chain mode for all queues, except command queue. */ 7531 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7532 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7533 else 7534 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7535 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7536 7537 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7538 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7539 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7540 7541 iwn_mem_write(sc, sc->sched_base + 7542 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7543 /* Set scheduler window size and frame limit. */ 7544 iwn_mem_write(sc, sc->sched_base + 7545 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7546 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7547 } 7548 7549 /* Enable interrupts for all our 20 queues. */ 7550 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7551 /* Identify TX FIFO rings (0-7). */ 7552 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7553 7554 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7555 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7556 /* Mark TX rings as active. */ 7557 for (qid = 0; qid < 11; qid++) { 7558 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7559 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7560 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7561 } 7562 } else { 7563 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7564 for (qid = 0; qid < 7; qid++) { 7565 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7566 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7567 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7568 } 7569 } 7570 iwn_nic_unlock(sc); 7571 7572 /* Configure WiMAX coexistence for combo adapters. */ 7573 error = iwn5000_send_wimax_coex(sc); 7574 if (error != 0) { 7575 device_printf(sc->sc_dev, 7576 "%s: could not configure WiMAX coexistence, error %d\n", 7577 __func__, error); 7578 return error; 7579 } 7580 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7581 /* Perform crystal calibration. */ 7582 error = iwn5000_crystal_calib(sc); 7583 if (error != 0) { 7584 device_printf(sc->sc_dev, 7585 "%s: crystal calibration failed, error %d\n", 7586 __func__, error); 7587 return error; 7588 } 7589 } 7590 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7591 /* Query calibration from the initialization firmware. */ 7592 if ((error = iwn5000_query_calibration(sc)) != 0) { 7593 device_printf(sc->sc_dev, 7594 "%s: could not query calibration, error %d\n", 7595 __func__, error); 7596 return error; 7597 } 7598 /* 7599 * We have the calibration results now, reboot with the 7600 * runtime firmware (call ourselves recursively!) 7601 */ 7602 iwn_hw_stop(sc); 7603 error = iwn_hw_init(sc); 7604 } else { 7605 /* Send calibration results to runtime firmware. */ 7606 error = iwn5000_send_calibration(sc); 7607 } 7608 7609 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7610 7611 return error; 7612 } 7613 7614 /* 7615 * The firmware boot code is small and is intended to be copied directly into 7616 * the NIC internal memory (no DMA transfer). 7617 */ 7618 static int 7619 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7620 { 7621 int error, ntries; 7622 7623 size /= sizeof (uint32_t); 7624 7625 if ((error = iwn_nic_lock(sc)) != 0) 7626 return error; 7627 7628 /* Copy microcode image into NIC memory. */ 7629 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7630 (const uint32_t *)ucode, size); 7631 7632 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7633 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7634 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7635 7636 /* Start boot load now. */ 7637 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7638 7639 /* Wait for transfer to complete. */ 7640 for (ntries = 0; ntries < 1000; ntries++) { 7641 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7642 IWN_BSM_WR_CTRL_START)) 7643 break; 7644 DELAY(10); 7645 } 7646 if (ntries == 1000) { 7647 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7648 __func__); 7649 iwn_nic_unlock(sc); 7650 return ETIMEDOUT; 7651 } 7652 7653 /* Enable boot after power up. */ 7654 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7655 7656 iwn_nic_unlock(sc); 7657 return 0; 7658 } 7659 7660 static int 7661 iwn4965_load_firmware(struct iwn_softc *sc) 7662 { 7663 struct iwn_fw_info *fw = &sc->fw; 7664 struct iwn_dma_info *dma = &sc->fw_dma; 7665 int error; 7666 7667 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7668 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7669 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7670 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7671 fw->init.text, fw->init.textsz); 7672 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7673 7674 /* Tell adapter where to find initialization sections. */ 7675 if ((error = iwn_nic_lock(sc)) != 0) 7676 return error; 7677 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7678 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7679 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7680 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7681 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7682 iwn_nic_unlock(sc); 7683 7684 /* Load firmware boot code. */ 7685 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7686 if (error != 0) { 7687 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7688 __func__); 7689 return error; 7690 } 7691 /* Now press "execute". */ 7692 IWN_WRITE(sc, IWN_RESET, 0); 7693 7694 /* Wait at most one second for first alive notification. */ 7695 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7696 device_printf(sc->sc_dev, 7697 "%s: timeout waiting for adapter to initialize, error %d\n", 7698 __func__, error); 7699 return error; 7700 } 7701 7702 /* Retrieve current temperature for initial TX power calibration. */ 7703 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7704 sc->temp = iwn4965_get_temperature(sc); 7705 7706 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7707 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7708 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7709 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7710 fw->main.text, fw->main.textsz); 7711 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7712 7713 /* Tell adapter where to find runtime sections. */ 7714 if ((error = iwn_nic_lock(sc)) != 0) 7715 return error; 7716 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7717 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7718 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7719 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7720 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7721 IWN_FW_UPDATED | fw->main.textsz); 7722 iwn_nic_unlock(sc); 7723 7724 return 0; 7725 } 7726 7727 static int 7728 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7729 const uint8_t *section, int size) 7730 { 7731 struct iwn_dma_info *dma = &sc->fw_dma; 7732 int error; 7733 7734 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7735 7736 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7737 memcpy(dma->vaddr, section, size); 7738 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7739 7740 if ((error = iwn_nic_lock(sc)) != 0) 7741 return error; 7742 7743 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7744 IWN_FH_TX_CONFIG_DMA_PAUSE); 7745 7746 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7747 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7748 IWN_LOADDR(dma->paddr)); 7749 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7750 IWN_HIADDR(dma->paddr) << 28 | size); 7751 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7752 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7753 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7754 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7755 7756 /* Kick Flow Handler to start DMA transfer. */ 7757 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7758 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7759 7760 iwn_nic_unlock(sc); 7761 7762 /* Wait at most five seconds for FH DMA transfer to complete. */ 7763 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 7764 } 7765 7766 static int 7767 iwn5000_load_firmware(struct iwn_softc *sc) 7768 { 7769 struct iwn_fw_part *fw; 7770 int error; 7771 7772 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7773 7774 /* Load the initialization firmware on first boot only. */ 7775 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7776 &sc->fw.main : &sc->fw.init; 7777 7778 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7779 fw->text, fw->textsz); 7780 if (error != 0) { 7781 device_printf(sc->sc_dev, 7782 "%s: could not load firmware %s section, error %d\n", 7783 __func__, ".text", error); 7784 return error; 7785 } 7786 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7787 fw->data, fw->datasz); 7788 if (error != 0) { 7789 device_printf(sc->sc_dev, 7790 "%s: could not load firmware %s section, error %d\n", 7791 __func__, ".data", error); 7792 return error; 7793 } 7794 7795 /* Now press "execute". */ 7796 IWN_WRITE(sc, IWN_RESET, 0); 7797 return 0; 7798 } 7799 7800 /* 7801 * Extract text and data sections from a legacy firmware image. 7802 */ 7803 static int 7804 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7805 { 7806 const uint32_t *ptr; 7807 size_t hdrlen = 24; 7808 uint32_t rev; 7809 7810 ptr = (const uint32_t *)fw->data; 7811 rev = le32toh(*ptr++); 7812 7813 /* Check firmware API version. */ 7814 if (IWN_FW_API(rev) <= 1) { 7815 device_printf(sc->sc_dev, 7816 "%s: bad firmware, need API version >=2\n", __func__); 7817 return EINVAL; 7818 } 7819 if (IWN_FW_API(rev) >= 3) { 7820 /* Skip build number (version 2 header). */ 7821 hdrlen += 4; 7822 ptr++; 7823 } 7824 if (fw->size < hdrlen) { 7825 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7826 __func__, fw->size); 7827 return EINVAL; 7828 } 7829 fw->main.textsz = le32toh(*ptr++); 7830 fw->main.datasz = le32toh(*ptr++); 7831 fw->init.textsz = le32toh(*ptr++); 7832 fw->init.datasz = le32toh(*ptr++); 7833 fw->boot.textsz = le32toh(*ptr++); 7834 7835 /* Check that all firmware sections fit. */ 7836 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7837 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7838 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7839 __func__, fw->size); 7840 return EINVAL; 7841 } 7842 7843 /* Get pointers to firmware sections. */ 7844 fw->main.text = (const uint8_t *)ptr; 7845 fw->main.data = fw->main.text + fw->main.textsz; 7846 fw->init.text = fw->main.data + fw->main.datasz; 7847 fw->init.data = fw->init.text + fw->init.textsz; 7848 fw->boot.text = fw->init.data + fw->init.datasz; 7849 return 0; 7850 } 7851 7852 /* 7853 * Extract text and data sections from a TLV firmware image. 7854 */ 7855 static int 7856 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7857 uint16_t alt) 7858 { 7859 const struct iwn_fw_tlv_hdr *hdr; 7860 const struct iwn_fw_tlv *tlv; 7861 const uint8_t *ptr, *end; 7862 uint64_t altmask; 7863 uint32_t len, tmp; 7864 7865 if (fw->size < sizeof (*hdr)) { 7866 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7867 __func__, fw->size); 7868 return EINVAL; 7869 } 7870 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7871 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7872 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7873 __func__, le32toh(hdr->signature)); 7874 return EINVAL; 7875 } 7876 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7877 le32toh(hdr->build)); 7878 7879 /* 7880 * Select the closest supported alternative that is less than 7881 * or equal to the specified one. 7882 */ 7883 altmask = le64toh(hdr->altmask); 7884 while (alt > 0 && !(altmask & (1ULL << alt))) 7885 alt--; /* Downgrade. */ 7886 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7887 7888 ptr = (const uint8_t *)(hdr + 1); 7889 end = (const uint8_t *)(fw->data + fw->size); 7890 7891 /* Parse type-length-value fields. */ 7892 while (ptr + sizeof (*tlv) <= end) { 7893 tlv = (const struct iwn_fw_tlv *)ptr; 7894 len = le32toh(tlv->len); 7895 7896 ptr += sizeof (*tlv); 7897 if (ptr + len > end) { 7898 device_printf(sc->sc_dev, 7899 "%s: firmware too short: %zu bytes\n", __func__, 7900 fw->size); 7901 return EINVAL; 7902 } 7903 /* Skip other alternatives. */ 7904 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7905 goto next; 7906 7907 switch (le16toh(tlv->type)) { 7908 case IWN_FW_TLV_MAIN_TEXT: 7909 fw->main.text = ptr; 7910 fw->main.textsz = len; 7911 break; 7912 case IWN_FW_TLV_MAIN_DATA: 7913 fw->main.data = ptr; 7914 fw->main.datasz = len; 7915 break; 7916 case IWN_FW_TLV_INIT_TEXT: 7917 fw->init.text = ptr; 7918 fw->init.textsz = len; 7919 break; 7920 case IWN_FW_TLV_INIT_DATA: 7921 fw->init.data = ptr; 7922 fw->init.datasz = len; 7923 break; 7924 case IWN_FW_TLV_BOOT_TEXT: 7925 fw->boot.text = ptr; 7926 fw->boot.textsz = len; 7927 break; 7928 case IWN_FW_TLV_ENH_SENS: 7929 if (!len) 7930 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7931 break; 7932 case IWN_FW_TLV_PHY_CALIB: 7933 tmp = le32toh(*ptr); 7934 if (tmp < 253) { 7935 sc->reset_noise_gain = tmp; 7936 sc->noise_gain = tmp + 1; 7937 } 7938 break; 7939 case IWN_FW_TLV_PAN: 7940 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7941 DPRINTF(sc, IWN_DEBUG_RESET, 7942 "PAN Support found: %d\n", 1); 7943 break; 7944 case IWN_FW_TLV_FLAGS: 7945 if (len < sizeof(uint32_t)) 7946 break; 7947 if (len % sizeof(uint32_t)) 7948 break; 7949 sc->tlv_feature_flags = le32toh(*ptr); 7950 DPRINTF(sc, IWN_DEBUG_RESET, 7951 "%s: feature: 0x%08x\n", 7952 __func__, 7953 sc->tlv_feature_flags); 7954 break; 7955 case IWN_FW_TLV_PBREQ_MAXLEN: 7956 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7957 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7958 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7959 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7960 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7961 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7962 case IWN_FW_TLV_WOWLAN_INST: 7963 case IWN_FW_TLV_WOWLAN_DATA: 7964 DPRINTF(sc, IWN_DEBUG_RESET, 7965 "TLV type %d reconized but not handled\n", 7966 le16toh(tlv->type)); 7967 break; 7968 default: 7969 DPRINTF(sc, IWN_DEBUG_RESET, 7970 "TLV type %d not handled\n", le16toh(tlv->type)); 7971 break; 7972 } 7973 next: /* TLV fields are 32-bit aligned. */ 7974 ptr += (len + 3) & ~3; 7975 } 7976 return 0; 7977 } 7978 7979 static int 7980 iwn_read_firmware(struct iwn_softc *sc) 7981 { 7982 struct iwn_fw_info *fw = &sc->fw; 7983 int error; 7984 7985 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7986 7987 IWN_UNLOCK(sc); 7988 7989 memset(fw, 0, sizeof (*fw)); 7990 7991 /* Read firmware image from filesystem. */ 7992 sc->fw_fp = firmware_get(sc->fwname); 7993 if (sc->fw_fp == NULL) { 7994 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7995 __func__, sc->fwname); 7996 IWN_LOCK(sc); 7997 return EINVAL; 7998 } 7999 IWN_LOCK(sc); 8000 8001 fw->size = sc->fw_fp->datasize; 8002 fw->data = (const uint8_t *)sc->fw_fp->data; 8003 if (fw->size < sizeof (uint32_t)) { 8004 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8005 __func__, fw->size); 8006 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8007 sc->fw_fp = NULL; 8008 return EINVAL; 8009 } 8010 8011 /* Retrieve text and data sections. */ 8012 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 8013 error = iwn_read_firmware_leg(sc, fw); 8014 else 8015 error = iwn_read_firmware_tlv(sc, fw, 1); 8016 if (error != 0) { 8017 device_printf(sc->sc_dev, 8018 "%s: could not read firmware sections, error %d\n", 8019 __func__, error); 8020 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8021 sc->fw_fp = NULL; 8022 return error; 8023 } 8024 8025 /* Make sure text and data sections fit in hardware memory. */ 8026 if (fw->main.textsz > sc->fw_text_maxsz || 8027 fw->main.datasz > sc->fw_data_maxsz || 8028 fw->init.textsz > sc->fw_text_maxsz || 8029 fw->init.datasz > sc->fw_data_maxsz || 8030 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 8031 (fw->boot.textsz & 3) != 0) { 8032 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 8033 __func__); 8034 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8035 sc->fw_fp = NULL; 8036 return EINVAL; 8037 } 8038 8039 /* We can proceed with loading the firmware. */ 8040 return 0; 8041 } 8042 8043 static int 8044 iwn_clock_wait(struct iwn_softc *sc) 8045 { 8046 int ntries; 8047 8048 /* Set "initialization complete" bit. */ 8049 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8050 8051 /* Wait for clock stabilization. */ 8052 for (ntries = 0; ntries < 2500; ntries++) { 8053 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 8054 return 0; 8055 DELAY(10); 8056 } 8057 device_printf(sc->sc_dev, 8058 "%s: timeout waiting for clock stabilization\n", __func__); 8059 return ETIMEDOUT; 8060 } 8061 8062 static int 8063 iwn_apm_init(struct iwn_softc *sc) 8064 { 8065 uint32_t reg; 8066 int error; 8067 8068 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8069 8070 /* Disable L0s exit timer (NMI bug workaround). */ 8071 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 8072 /* Don't wait for ICH L0s (ICH bug workaround). */ 8073 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 8074 8075 /* Set FH wait threshold to max (HW bug under stress workaround). */ 8076 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 8077 8078 /* Enable HAP INTA to move adapter from L1a to L0s. */ 8079 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 8080 8081 /* Retrieve PCIe Active State Power Management (ASPM). */ 8082 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 8083 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 8084 if (reg & 0x02) /* L1 Entry enabled. */ 8085 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8086 else 8087 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8088 8089 if (sc->base_params->pll_cfg_val) 8090 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 8091 8092 /* Wait for clock stabilization before accessing prph. */ 8093 if ((error = iwn_clock_wait(sc)) != 0) 8094 return error; 8095 8096 if ((error = iwn_nic_lock(sc)) != 0) 8097 return error; 8098 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 8099 /* Enable DMA and BSM (Bootstrap State Machine). */ 8100 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8101 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 8102 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 8103 } else { 8104 /* Enable DMA. */ 8105 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8106 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8107 } 8108 DELAY(20); 8109 /* Disable L1-Active. */ 8110 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 8111 iwn_nic_unlock(sc); 8112 8113 return 0; 8114 } 8115 8116 static void 8117 iwn_apm_stop_master(struct iwn_softc *sc) 8118 { 8119 int ntries; 8120 8121 /* Stop busmaster DMA activity. */ 8122 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 8123 for (ntries = 0; ntries < 100; ntries++) { 8124 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 8125 return; 8126 DELAY(10); 8127 } 8128 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 8129 } 8130 8131 static void 8132 iwn_apm_stop(struct iwn_softc *sc) 8133 { 8134 iwn_apm_stop_master(sc); 8135 8136 /* Reset the entire device. */ 8137 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 8138 DELAY(10); 8139 /* Clear "initialization complete" bit. */ 8140 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8141 } 8142 8143 static int 8144 iwn4965_nic_config(struct iwn_softc *sc) 8145 { 8146 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8147 8148 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 8149 /* 8150 * I don't believe this to be correct but this is what the 8151 * vendor driver is doing. Probably the bits should not be 8152 * shifted in IWN_RFCFG_*. 8153 */ 8154 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8155 IWN_RFCFG_TYPE(sc->rfcfg) | 8156 IWN_RFCFG_STEP(sc->rfcfg) | 8157 IWN_RFCFG_DASH(sc->rfcfg)); 8158 } 8159 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8160 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8161 return 0; 8162 } 8163 8164 static int 8165 iwn5000_nic_config(struct iwn_softc *sc) 8166 { 8167 uint32_t tmp; 8168 int error; 8169 8170 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8171 8172 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8173 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8174 IWN_RFCFG_TYPE(sc->rfcfg) | 8175 IWN_RFCFG_STEP(sc->rfcfg) | 8176 IWN_RFCFG_DASH(sc->rfcfg)); 8177 } 8178 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8179 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8180 8181 if ((error = iwn_nic_lock(sc)) != 0) 8182 return error; 8183 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8184 8185 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8186 /* 8187 * Select first Switching Voltage Regulator (1.32V) to 8188 * solve a stability issue related to noisy DC2DC line 8189 * in the silicon of 1000 Series. 8190 */ 8191 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8192 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8193 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8194 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8195 } 8196 iwn_nic_unlock(sc); 8197 8198 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8199 /* Use internal power amplifier only. */ 8200 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8201 } 8202 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8203 /* Indicate that ROM calibration version is >=6. */ 8204 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8205 } 8206 if (sc->base_params->additional_gp_drv_bit) 8207 IWN_SETBITS(sc, IWN_GP_DRIVER, 8208 sc->base_params->additional_gp_drv_bit); 8209 return 0; 8210 } 8211 8212 /* 8213 * Take NIC ownership over Intel Active Management Technology (AMT). 8214 */ 8215 static int 8216 iwn_hw_prepare(struct iwn_softc *sc) 8217 { 8218 int ntries; 8219 8220 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8221 8222 /* Check if hardware is ready. */ 8223 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8224 for (ntries = 0; ntries < 5; ntries++) { 8225 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8226 IWN_HW_IF_CONFIG_NIC_READY) 8227 return 0; 8228 DELAY(10); 8229 } 8230 8231 /* Hardware not ready, force into ready state. */ 8232 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8233 for (ntries = 0; ntries < 15000; ntries++) { 8234 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8235 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8236 break; 8237 DELAY(10); 8238 } 8239 if (ntries == 15000) 8240 return ETIMEDOUT; 8241 8242 /* Hardware should be ready now. */ 8243 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8244 for (ntries = 0; ntries < 5; ntries++) { 8245 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8246 IWN_HW_IF_CONFIG_NIC_READY) 8247 return 0; 8248 DELAY(10); 8249 } 8250 return ETIMEDOUT; 8251 } 8252 8253 static int 8254 iwn_hw_init(struct iwn_softc *sc) 8255 { 8256 struct iwn_ops *ops = &sc->ops; 8257 int error, chnl, qid; 8258 8259 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8260 8261 /* Clear pending interrupts. */ 8262 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8263 8264 if ((error = iwn_apm_init(sc)) != 0) { 8265 device_printf(sc->sc_dev, 8266 "%s: could not power ON adapter, error %d\n", __func__, 8267 error); 8268 return error; 8269 } 8270 8271 /* Select VMAIN power source. */ 8272 if ((error = iwn_nic_lock(sc)) != 0) 8273 return error; 8274 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8275 iwn_nic_unlock(sc); 8276 8277 /* Perform adapter-specific initialization. */ 8278 if ((error = ops->nic_config(sc)) != 0) 8279 return error; 8280 8281 /* Initialize RX ring. */ 8282 if ((error = iwn_nic_lock(sc)) != 0) 8283 return error; 8284 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8285 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8286 /* Set physical address of RX ring (256-byte aligned). */ 8287 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8288 /* Set physical address of RX status (16-byte aligned). */ 8289 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8290 /* Enable RX. */ 8291 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8292 IWN_FH_RX_CONFIG_ENA | 8293 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8294 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8295 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8296 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8297 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8298 iwn_nic_unlock(sc); 8299 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8300 8301 if ((error = iwn_nic_lock(sc)) != 0) 8302 return error; 8303 8304 /* Initialize TX scheduler. */ 8305 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8306 8307 /* Set physical address of "keep warm" page (16-byte aligned). */ 8308 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8309 8310 /* Initialize TX rings. */ 8311 for (qid = 0; qid < sc->ntxqs; qid++) { 8312 struct iwn_tx_ring *txq = &sc->txq[qid]; 8313 8314 /* Set physical address of TX ring (256-byte aligned). */ 8315 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8316 txq->desc_dma.paddr >> 8); 8317 } 8318 iwn_nic_unlock(sc); 8319 8320 /* Enable DMA channels. */ 8321 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8322 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8323 IWN_FH_TX_CONFIG_DMA_ENA | 8324 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8325 } 8326 8327 /* Clear "radio off" and "commands blocked" bits. */ 8328 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8329 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8330 8331 /* Clear pending interrupts. */ 8332 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8333 /* Enable interrupt coalescing. */ 8334 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8335 /* Enable interrupts. */ 8336 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8337 8338 /* _Really_ make sure "radio off" bit is cleared! */ 8339 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8340 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8341 8342 /* Enable shadow registers. */ 8343 if (sc->base_params->shadow_reg_enable) 8344 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8345 8346 if ((error = ops->load_firmware(sc)) != 0) { 8347 device_printf(sc->sc_dev, 8348 "%s: could not load firmware, error %d\n", __func__, 8349 error); 8350 return error; 8351 } 8352 /* Wait at most one second for firmware alive notification. */ 8353 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8354 device_printf(sc->sc_dev, 8355 "%s: timeout waiting for adapter to initialize, error %d\n", 8356 __func__, error); 8357 return error; 8358 } 8359 /* Do post-firmware initialization. */ 8360 8361 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8362 8363 return ops->post_alive(sc); 8364 } 8365 8366 static void 8367 iwn_hw_stop(struct iwn_softc *sc) 8368 { 8369 int chnl, qid, ntries; 8370 8371 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8372 8373 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8374 8375 /* Disable interrupts. */ 8376 IWN_WRITE(sc, IWN_INT_MASK, 0); 8377 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8378 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8379 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8380 8381 /* Make sure we no longer hold the NIC lock. */ 8382 iwn_nic_unlock(sc); 8383 8384 /* Stop TX scheduler. */ 8385 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8386 8387 /* Stop all DMA channels. */ 8388 if (iwn_nic_lock(sc) == 0) { 8389 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8390 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8391 for (ntries = 0; ntries < 200; ntries++) { 8392 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8393 IWN_FH_TX_STATUS_IDLE(chnl)) 8394 break; 8395 DELAY(10); 8396 } 8397 } 8398 iwn_nic_unlock(sc); 8399 } 8400 8401 /* Stop RX ring. */ 8402 iwn_reset_rx_ring(sc, &sc->rxq); 8403 8404 /* Reset all TX rings. */ 8405 for (qid = 0; qid < sc->ntxqs; qid++) 8406 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8407 8408 if (iwn_nic_lock(sc) == 0) { 8409 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8410 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8411 iwn_nic_unlock(sc); 8412 } 8413 DELAY(5); 8414 /* Power OFF adapter. */ 8415 iwn_apm_stop(sc); 8416 } 8417 8418 static void 8419 iwn_radio_on(void *arg0, int pending) 8420 { 8421 struct iwn_softc *sc = arg0; 8422 struct ifnet *ifp = sc->sc_ifp; 8423 struct ieee80211com *ic = ifp->if_l2com; 8424 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8425 8426 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8427 8428 if (vap != NULL) { 8429 iwn_init(sc); 8430 ieee80211_init(vap); 8431 } 8432 } 8433 8434 static void 8435 iwn_radio_off(void *arg0, int pending) 8436 { 8437 struct iwn_softc *sc = arg0; 8438 struct ifnet *ifp = sc->sc_ifp; 8439 struct ieee80211com *ic = ifp->if_l2com; 8440 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8441 8442 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8443 8444 iwn_stop(sc); 8445 if (vap != NULL) 8446 ieee80211_stop(vap); 8447 8448 /* Enable interrupts to get RF toggle notification. */ 8449 IWN_LOCK(sc); 8450 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8451 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8452 IWN_UNLOCK(sc); 8453 } 8454 8455 static void 8456 iwn_panicked(void *arg0, int pending) 8457 { 8458 struct iwn_softc *sc = arg0; 8459 struct ifnet *ifp = sc->sc_ifp; 8460 struct ieee80211com *ic = ifp->if_l2com; 8461 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8462 int error; 8463 8464 if (vap == NULL) { 8465 printf("%s: null vap\n", __func__); 8466 return; 8467 } 8468 8469 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 8470 "resetting...\n", __func__, vap->iv_state); 8471 8472 iwn_stop(sc); 8473 iwn_init(sc); 8474 iwn_start(sc->sc_ifp); 8475 if (vap->iv_state >= IEEE80211_S_AUTH && 8476 (error = iwn_auth(sc, vap)) != 0) { 8477 device_printf(sc->sc_dev, 8478 "%s: could not move to auth state\n", __func__); 8479 } 8480 if (vap->iv_state >= IEEE80211_S_RUN && 8481 (error = iwn_run(sc, vap)) != 0) { 8482 device_printf(sc->sc_dev, 8483 "%s: could not move to run state\n", __func__); 8484 } 8485 } 8486 8487 static void 8488 iwn_init_locked(struct iwn_softc *sc) 8489 { 8490 struct ifnet *ifp = sc->sc_ifp; 8491 int error; 8492 8493 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8494 8495 IWN_LOCK_ASSERT(sc); 8496 8497 if ((error = iwn_hw_prepare(sc)) != 0) { 8498 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8499 __func__, error); 8500 goto fail; 8501 } 8502 8503 /* Initialize interrupt mask to default value. */ 8504 sc->int_mask = IWN_INT_MASK_DEF; 8505 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8506 8507 /* Check that the radio is not disabled by hardware switch. */ 8508 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8509 device_printf(sc->sc_dev, 8510 "radio is disabled by hardware switch\n"); 8511 /* Enable interrupts to get RF toggle notifications. */ 8512 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8513 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8514 return; 8515 } 8516 8517 /* Read firmware images from the filesystem. */ 8518 if ((error = iwn_read_firmware(sc)) != 0) { 8519 device_printf(sc->sc_dev, 8520 "%s: could not read firmware, error %d\n", __func__, 8521 error); 8522 goto fail; 8523 } 8524 8525 /* Initialize hardware and upload firmware. */ 8526 error = iwn_hw_init(sc); 8527 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8528 sc->fw_fp = NULL; 8529 if (error != 0) { 8530 device_printf(sc->sc_dev, 8531 "%s: could not initialize hardware, error %d\n", __func__, 8532 error); 8533 goto fail; 8534 } 8535 8536 /* Configure adapter now that it is ready. */ 8537 if ((error = iwn_config(sc)) != 0) { 8538 device_printf(sc->sc_dev, 8539 "%s: could not configure device, error %d\n", __func__, 8540 error); 8541 goto fail; 8542 } 8543 8544 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 8545 ifp->if_drv_flags |= IFF_DRV_RUNNING; 8546 8547 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 8548 8549 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8550 8551 return; 8552 8553 fail: iwn_stop_locked(sc); 8554 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8555 } 8556 8557 static void 8558 iwn_init(void *arg) 8559 { 8560 struct iwn_softc *sc = arg; 8561 struct ifnet *ifp = sc->sc_ifp; 8562 struct ieee80211com *ic = ifp->if_l2com; 8563 8564 IWN_LOCK(sc); 8565 iwn_init_locked(sc); 8566 IWN_UNLOCK(sc); 8567 8568 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 8569 ieee80211_start_all(ic); 8570 } 8571 8572 static void 8573 iwn_stop_locked(struct iwn_softc *sc) 8574 { 8575 struct ifnet *ifp = sc->sc_ifp; 8576 8577 IWN_LOCK_ASSERT(sc); 8578 8579 sc->sc_is_scanning = 0; 8580 sc->sc_tx_timer = 0; 8581 callout_stop(&sc->watchdog_to); 8582 callout_stop(&sc->calib_to); 8583 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 8584 8585 /* Power OFF hardware. */ 8586 iwn_hw_stop(sc); 8587 } 8588 8589 static void 8590 iwn_stop(struct iwn_softc *sc) 8591 { 8592 IWN_LOCK(sc); 8593 iwn_stop_locked(sc); 8594 IWN_UNLOCK(sc); 8595 } 8596 8597 /* 8598 * Callback from net80211 to start a scan. 8599 */ 8600 static void 8601 iwn_scan_start(struct ieee80211com *ic) 8602 { 8603 struct ifnet *ifp = ic->ic_ifp; 8604 struct iwn_softc *sc = ifp->if_softc; 8605 8606 IWN_LOCK(sc); 8607 /* make the link LED blink while we're scanning */ 8608 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8609 IWN_UNLOCK(sc); 8610 } 8611 8612 /* 8613 * Callback from net80211 to terminate a scan. 8614 */ 8615 static void 8616 iwn_scan_end(struct ieee80211com *ic) 8617 { 8618 struct ifnet *ifp = ic->ic_ifp; 8619 struct iwn_softc *sc = ifp->if_softc; 8620 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8621 8622 IWN_LOCK(sc); 8623 if (vap->iv_state == IEEE80211_S_RUN) { 8624 /* Set link LED to ON status if we are associated */ 8625 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8626 } 8627 IWN_UNLOCK(sc); 8628 } 8629 8630 /* 8631 * Callback from net80211 to force a channel change. 8632 */ 8633 static void 8634 iwn_set_channel(struct ieee80211com *ic) 8635 { 8636 const struct ieee80211_channel *c = ic->ic_curchan; 8637 struct ifnet *ifp = ic->ic_ifp; 8638 struct iwn_softc *sc = ifp->if_softc; 8639 int error; 8640 8641 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8642 8643 IWN_LOCK(sc); 8644 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8645 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8646 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8647 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8648 8649 /* 8650 * Only need to set the channel in Monitor mode. AP scanning and auth 8651 * are already taken care of by their respective firmware commands. 8652 */ 8653 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8654 error = iwn_config(sc); 8655 if (error != 0) 8656 device_printf(sc->sc_dev, 8657 "%s: error %d settting channel\n", __func__, error); 8658 } 8659 IWN_UNLOCK(sc); 8660 } 8661 8662 /* 8663 * Callback from net80211 to start scanning of the current channel. 8664 */ 8665 static void 8666 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8667 { 8668 struct ieee80211vap *vap = ss->ss_vap; 8669 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8670 struct ieee80211com *ic = vap->iv_ic; 8671 int error; 8672 8673 IWN_LOCK(sc); 8674 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 8675 IWN_UNLOCK(sc); 8676 if (error != 0) 8677 ieee80211_cancel_scan(vap); 8678 } 8679 8680 /* 8681 * Callback from net80211 to handle the minimum dwell time being met. 8682 * The intent is to terminate the scan but we just let the firmware 8683 * notify us when it's finished as we have no safe way to abort it. 8684 */ 8685 static void 8686 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8687 { 8688 /* NB: don't try to abort scan; wait for firmware to finish */ 8689 } 8690 8691 static void 8692 iwn_hw_reset(void *arg0, int pending) 8693 { 8694 struct iwn_softc *sc = arg0; 8695 struct ifnet *ifp = sc->sc_ifp; 8696 struct ieee80211com *ic = ifp->if_l2com; 8697 8698 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8699 8700 iwn_stop(sc); 8701 iwn_init(sc); 8702 ieee80211_notify_radio(ic, 1); 8703 } 8704 #ifdef IWN_DEBUG 8705 #define IWN_DESC(x) case x: return #x 8706 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8707 8708 /* 8709 * Translate CSR code to string 8710 */ 8711 static char *iwn_get_csr_string(int csr) 8712 { 8713 switch (csr) { 8714 IWN_DESC(IWN_HW_IF_CONFIG); 8715 IWN_DESC(IWN_INT_COALESCING); 8716 IWN_DESC(IWN_INT); 8717 IWN_DESC(IWN_INT_MASK); 8718 IWN_DESC(IWN_FH_INT); 8719 IWN_DESC(IWN_GPIO_IN); 8720 IWN_DESC(IWN_RESET); 8721 IWN_DESC(IWN_GP_CNTRL); 8722 IWN_DESC(IWN_HW_REV); 8723 IWN_DESC(IWN_EEPROM); 8724 IWN_DESC(IWN_EEPROM_GP); 8725 IWN_DESC(IWN_OTP_GP); 8726 IWN_DESC(IWN_GIO); 8727 IWN_DESC(IWN_GP_UCODE); 8728 IWN_DESC(IWN_GP_DRIVER); 8729 IWN_DESC(IWN_UCODE_GP1); 8730 IWN_DESC(IWN_UCODE_GP2); 8731 IWN_DESC(IWN_LED); 8732 IWN_DESC(IWN_DRAM_INT_TBL); 8733 IWN_DESC(IWN_GIO_CHICKEN); 8734 IWN_DESC(IWN_ANA_PLL); 8735 IWN_DESC(IWN_HW_REV_WA); 8736 IWN_DESC(IWN_DBG_HPET_MEM); 8737 default: 8738 return "UNKNOWN CSR"; 8739 } 8740 } 8741 8742 /* 8743 * This function print firmware register 8744 */ 8745 static void 8746 iwn_debug_register(struct iwn_softc *sc) 8747 { 8748 int i; 8749 static const uint32_t csr_tbl[] = { 8750 IWN_HW_IF_CONFIG, 8751 IWN_INT_COALESCING, 8752 IWN_INT, 8753 IWN_INT_MASK, 8754 IWN_FH_INT, 8755 IWN_GPIO_IN, 8756 IWN_RESET, 8757 IWN_GP_CNTRL, 8758 IWN_HW_REV, 8759 IWN_EEPROM, 8760 IWN_EEPROM_GP, 8761 IWN_OTP_GP, 8762 IWN_GIO, 8763 IWN_GP_UCODE, 8764 IWN_GP_DRIVER, 8765 IWN_UCODE_GP1, 8766 IWN_UCODE_GP2, 8767 IWN_LED, 8768 IWN_DRAM_INT_TBL, 8769 IWN_GIO_CHICKEN, 8770 IWN_ANA_PLL, 8771 IWN_HW_REV_WA, 8772 IWN_DBG_HPET_MEM, 8773 }; 8774 DPRINTF(sc, IWN_DEBUG_REGISTER, 8775 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8776 "\n"); 8777 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8778 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8779 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8780 if ((i+1) % 3 == 0) 8781 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8782 } 8783 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8784 } 8785 #endif 8786