1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_wlan.h" 31 #include "opt_iwn.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/sysctl.h> 36 #include <sys/mbuf.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/clock.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ethernet.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 #include <net/if_types.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/if_ether.h> 70 #include <netinet/ip.h> 71 72 #include <net80211/ieee80211_var.h> 73 #include <net80211/ieee80211_radiotap.h> 74 #include <net80211/ieee80211_regdomain.h> 75 #include <net80211/ieee80211_ratectl.h> 76 77 #include <dev/iwn/if_iwnreg.h> 78 #include <dev/iwn/if_iwnvar.h> 79 #include <dev/iwn/if_iwn_devid.h> 80 #include <dev/iwn/if_iwn_chip_cfg.h> 81 #include <dev/iwn/if_iwn_debug.h> 82 #include <dev/iwn/if_iwn_ioctl.h> 83 84 struct iwn_ident { 85 uint16_t vendor; 86 uint16_t device; 87 const char *name; 88 }; 89 90 static const struct iwn_ident iwn_ident_table[] = { 91 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 92 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 93 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 94 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 95 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 96 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 97 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 98 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 99 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 100 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 101 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 102 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 103 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 104 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 105 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 106 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 107 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 108 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 109 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 110 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 111 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 112 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, 113 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, 114 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 115 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 116 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 117 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 118 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 119 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 120 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 121 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 122 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 123 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 124 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 125 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 126 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 127 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 128 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 129 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 130 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 131 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 132 { 0, 0, NULL } 133 }; 134 135 static int iwn_probe(device_t); 136 static int iwn_attach(device_t); 137 static int iwn4965_attach(struct iwn_softc *, uint16_t); 138 static int iwn5000_attach(struct iwn_softc *, uint16_t); 139 static int iwn_config_specific(struct iwn_softc *, uint16_t); 140 static void iwn_radiotap_attach(struct iwn_softc *); 141 static void iwn_sysctlattach(struct iwn_softc *); 142 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 143 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 144 const uint8_t [IEEE80211_ADDR_LEN], 145 const uint8_t [IEEE80211_ADDR_LEN]); 146 static void iwn_vap_delete(struct ieee80211vap *); 147 static int iwn_detach(device_t); 148 static int iwn_shutdown(device_t); 149 static int iwn_suspend(device_t); 150 static int iwn_resume(device_t); 151 static int iwn_nic_lock(struct iwn_softc *); 152 static int iwn_eeprom_lock(struct iwn_softc *); 153 static int iwn_init_otprom(struct iwn_softc *); 154 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 155 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 156 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 157 void **, bus_size_t, bus_size_t); 158 static void iwn_dma_contig_free(struct iwn_dma_info *); 159 static int iwn_alloc_sched(struct iwn_softc *); 160 static void iwn_free_sched(struct iwn_softc *); 161 static int iwn_alloc_kw(struct iwn_softc *); 162 static void iwn_free_kw(struct iwn_softc *); 163 static int iwn_alloc_ict(struct iwn_softc *); 164 static void iwn_free_ict(struct iwn_softc *); 165 static int iwn_alloc_fwmem(struct iwn_softc *); 166 static void iwn_free_fwmem(struct iwn_softc *); 167 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 168 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 169 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 170 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 171 int); 172 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 173 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 174 static void iwn5000_ict_reset(struct iwn_softc *); 175 static int iwn_read_eeprom(struct iwn_softc *, 176 uint8_t macaddr[IEEE80211_ADDR_LEN]); 177 static void iwn4965_read_eeprom(struct iwn_softc *); 178 #ifdef IWN_DEBUG 179 static void iwn4965_print_power_group(struct iwn_softc *, int); 180 #endif 181 static void iwn5000_read_eeprom(struct iwn_softc *); 182 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 183 static void iwn_read_eeprom_band(struct iwn_softc *, int); 184 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 185 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 186 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 187 struct ieee80211_channel *); 188 static int iwn_setregdomain(struct ieee80211com *, 189 struct ieee80211_regdomain *, int, 190 struct ieee80211_channel[]); 191 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 192 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 193 const uint8_t mac[IEEE80211_ADDR_LEN]); 194 static void iwn_newassoc(struct ieee80211_node *, int); 195 static int iwn_media_change(struct ifnet *); 196 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 197 static void iwn_calib_timeout(void *); 198 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 199 struct iwn_rx_data *); 200 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 201 struct iwn_rx_data *); 202 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 203 struct iwn_rx_data *); 204 static void iwn5000_rx_calib_results(struct iwn_softc *, 205 struct iwn_rx_desc *, struct iwn_rx_data *); 206 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 207 struct iwn_rx_data *); 208 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 209 struct iwn_rx_data *); 210 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 211 struct iwn_rx_data *); 212 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 213 uint8_t); 214 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 215 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 216 static void iwn_notif_intr(struct iwn_softc *); 217 static void iwn_wakeup_intr(struct iwn_softc *); 218 static void iwn_rftoggle_intr(struct iwn_softc *); 219 static void iwn_fatal_intr(struct iwn_softc *); 220 static void iwn_intr(void *); 221 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 222 uint16_t); 223 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 224 uint16_t); 225 #ifdef notyet 226 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 227 #endif 228 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 229 struct ieee80211_node *); 230 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 231 struct ieee80211_node *, 232 const struct ieee80211_bpf_params *params); 233 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 234 const struct ieee80211_bpf_params *); 235 static void iwn_start(struct ifnet *); 236 static void iwn_start_locked(struct ifnet *); 237 static void iwn_watchdog(void *); 238 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 239 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 240 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 241 int); 242 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 243 int); 244 static int iwn_set_link_quality(struct iwn_softc *, 245 struct ieee80211_node *); 246 static int iwn_add_broadcast_node(struct iwn_softc *, int); 247 static int iwn_updateedca(struct ieee80211com *); 248 static void iwn_update_mcast(struct ifnet *); 249 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 250 static int iwn_set_critical_temp(struct iwn_softc *); 251 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 252 static void iwn4965_power_calibration(struct iwn_softc *, int); 253 static int iwn4965_set_txpower(struct iwn_softc *, 254 struct ieee80211_channel *, int); 255 static int iwn5000_set_txpower(struct iwn_softc *, 256 struct ieee80211_channel *, int); 257 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 258 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 259 static int iwn_get_noise(const struct iwn_rx_general_stats *); 260 static int iwn4965_get_temperature(struct iwn_softc *); 261 static int iwn5000_get_temperature(struct iwn_softc *); 262 static int iwn_init_sensitivity(struct iwn_softc *); 263 static void iwn_collect_noise(struct iwn_softc *, 264 const struct iwn_rx_general_stats *); 265 static int iwn4965_init_gains(struct iwn_softc *); 266 static int iwn5000_init_gains(struct iwn_softc *); 267 static int iwn4965_set_gains(struct iwn_softc *); 268 static int iwn5000_set_gains(struct iwn_softc *); 269 static void iwn_tune_sensitivity(struct iwn_softc *, 270 const struct iwn_rx_stats *); 271 static void iwn_save_stats_counters(struct iwn_softc *, 272 const struct iwn_stats *); 273 static int iwn_send_sensitivity(struct iwn_softc *); 274 static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 275 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 276 static int iwn_send_btcoex(struct iwn_softc *); 277 static int iwn_send_advanced_btcoex(struct iwn_softc *); 278 static int iwn5000_runtime_calib(struct iwn_softc *); 279 static int iwn_config(struct iwn_softc *); 280 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 281 static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 282 struct ieee80211_scan_state *, struct ieee80211_channel *); 283 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 284 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 285 static int iwn_ampdu_rx_start(struct ieee80211_node *, 286 struct ieee80211_rx_ampdu *, int, int, int); 287 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 288 struct ieee80211_rx_ampdu *); 289 static int iwn_addba_request(struct ieee80211_node *, 290 struct ieee80211_tx_ampdu *, int, int, int); 291 static int iwn_addba_response(struct ieee80211_node *, 292 struct ieee80211_tx_ampdu *, int, int, int); 293 static int iwn_ampdu_tx_start(struct ieee80211com *, 294 struct ieee80211_node *, uint8_t); 295 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 296 struct ieee80211_tx_ampdu *); 297 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 298 struct ieee80211_node *, int, uint8_t, uint16_t); 299 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 300 uint8_t, uint16_t); 301 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 302 struct ieee80211_node *, int, uint8_t, uint16_t); 303 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 304 uint8_t, uint16_t); 305 static int iwn5000_query_calibration(struct iwn_softc *); 306 static int iwn5000_send_calibration(struct iwn_softc *); 307 static int iwn5000_send_wimax_coex(struct iwn_softc *); 308 static int iwn5000_crystal_calib(struct iwn_softc *); 309 static int iwn5000_temp_offset_calib(struct iwn_softc *); 310 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 311 static int iwn4965_post_alive(struct iwn_softc *); 312 static int iwn5000_post_alive(struct iwn_softc *); 313 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 314 int); 315 static int iwn4965_load_firmware(struct iwn_softc *); 316 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 317 const uint8_t *, int); 318 static int iwn5000_load_firmware(struct iwn_softc *); 319 static int iwn_read_firmware_leg(struct iwn_softc *, 320 struct iwn_fw_info *); 321 static int iwn_read_firmware_tlv(struct iwn_softc *, 322 struct iwn_fw_info *, uint16_t); 323 static int iwn_read_firmware(struct iwn_softc *); 324 static int iwn_clock_wait(struct iwn_softc *); 325 static int iwn_apm_init(struct iwn_softc *); 326 static void iwn_apm_stop_master(struct iwn_softc *); 327 static void iwn_apm_stop(struct iwn_softc *); 328 static int iwn4965_nic_config(struct iwn_softc *); 329 static int iwn5000_nic_config(struct iwn_softc *); 330 static int iwn_hw_prepare(struct iwn_softc *); 331 static int iwn_hw_init(struct iwn_softc *); 332 static void iwn_hw_stop(struct iwn_softc *); 333 static void iwn_radio_on(void *, int); 334 static void iwn_radio_off(void *, int); 335 static void iwn_init_locked(struct iwn_softc *); 336 static void iwn_init(void *); 337 static void iwn_stop_locked(struct iwn_softc *); 338 static void iwn_stop(struct iwn_softc *); 339 static void iwn_scan_start(struct ieee80211com *); 340 static void iwn_scan_end(struct ieee80211com *); 341 static void iwn_set_channel(struct ieee80211com *); 342 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 343 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 344 static void iwn_hw_reset(void *, int); 345 #ifdef IWN_DEBUG 346 static char *iwn_get_csr_string(int); 347 static void iwn_debug_register(struct iwn_softc *); 348 #endif 349 350 static device_method_t iwn_methods[] = { 351 /* Device interface */ 352 DEVMETHOD(device_probe, iwn_probe), 353 DEVMETHOD(device_attach, iwn_attach), 354 DEVMETHOD(device_detach, iwn_detach), 355 DEVMETHOD(device_shutdown, iwn_shutdown), 356 DEVMETHOD(device_suspend, iwn_suspend), 357 DEVMETHOD(device_resume, iwn_resume), 358 359 DEVMETHOD_END 360 }; 361 362 static driver_t iwn_driver = { 363 "iwn", 364 iwn_methods, 365 sizeof(struct iwn_softc) 366 }; 367 static devclass_t iwn_devclass; 368 369 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 370 371 MODULE_VERSION(iwn, 1); 372 373 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 374 MODULE_DEPEND(iwn, pci, 1, 1, 1); 375 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 376 377 static int 378 iwn_probe(device_t dev) 379 { 380 const struct iwn_ident *ident; 381 382 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 383 if (pci_get_vendor(dev) == ident->vendor && 384 pci_get_device(dev) == ident->device) { 385 device_set_desc(dev, ident->name); 386 return (BUS_PROBE_DEFAULT); 387 } 388 } 389 return ENXIO; 390 } 391 392 static int 393 iwn_attach(device_t dev) 394 { 395 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 396 struct ieee80211com *ic; 397 struct ifnet *ifp; 398 int i, error, rid; 399 uint8_t macaddr[IEEE80211_ADDR_LEN]; 400 401 sc->sc_dev = dev; 402 403 #ifdef IWN_DEBUG 404 error = resource_int_value(device_get_name(sc->sc_dev), 405 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 406 if (error != 0) 407 sc->sc_debug = 0; 408 #else 409 sc->sc_debug = 0; 410 #endif 411 412 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 413 414 /* 415 * Get the offset of the PCI Express Capability Structure in PCI 416 * Configuration Space. 417 */ 418 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 419 if (error != 0) { 420 device_printf(dev, "PCIe capability structure not found!\n"); 421 return error; 422 } 423 424 /* Clear device-specific "PCI retry timeout" register (41h). */ 425 pci_write_config(dev, 0x41, 0, 1); 426 427 /* Enable bus-mastering. */ 428 pci_enable_busmaster(dev); 429 430 rid = PCIR_BAR(0); 431 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 432 RF_ACTIVE); 433 if (sc->mem == NULL) { 434 device_printf(dev, "can't map mem space\n"); 435 error = ENOMEM; 436 return error; 437 } 438 sc->sc_st = rman_get_bustag(sc->mem); 439 sc->sc_sh = rman_get_bushandle(sc->mem); 440 441 i = 1; 442 rid = 0; 443 if (pci_alloc_msi(dev, &i) == 0) 444 rid = 1; 445 /* Install interrupt handler. */ 446 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 447 (rid != 0 ? 0 : RF_SHAREABLE)); 448 if (sc->irq == NULL) { 449 device_printf(dev, "can't map interrupt\n"); 450 error = ENOMEM; 451 goto fail; 452 } 453 454 IWN_LOCK_INIT(sc); 455 456 /* Read hardware revision and attach. */ 457 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 458 & IWN_HW_REV_TYPE_MASK; 459 sc->subdevice_id = pci_get_subdevice(dev); 460 461 /* 462 * 4965 versus 5000 and later have different methods. 463 * Let's set those up first. 464 */ 465 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 466 error = iwn4965_attach(sc, pci_get_device(dev)); 467 else 468 error = iwn5000_attach(sc, pci_get_device(dev)); 469 if (error != 0) { 470 device_printf(dev, "could not attach device, error %d\n", 471 error); 472 goto fail; 473 } 474 475 /* 476 * Next, let's setup the various parameters of each NIC. 477 */ 478 error = iwn_config_specific(sc, pci_get_device(dev)); 479 if (error != 0) { 480 device_printf(dev, "could not attach device, error %d\n", 481 error); 482 goto fail; 483 } 484 485 if ((error = iwn_hw_prepare(sc)) != 0) { 486 device_printf(dev, "hardware not ready, error %d\n", error); 487 goto fail; 488 } 489 490 /* Allocate DMA memory for firmware transfers. */ 491 if ((error = iwn_alloc_fwmem(sc)) != 0) { 492 device_printf(dev, 493 "could not allocate memory for firmware, error %d\n", 494 error); 495 goto fail; 496 } 497 498 /* Allocate "Keep Warm" page. */ 499 if ((error = iwn_alloc_kw(sc)) != 0) { 500 device_printf(dev, 501 "could not allocate keep warm page, error %d\n", error); 502 goto fail; 503 } 504 505 /* Allocate ICT table for 5000 Series. */ 506 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 507 (error = iwn_alloc_ict(sc)) != 0) { 508 device_printf(dev, "could not allocate ICT table, error %d\n", 509 error); 510 goto fail; 511 } 512 513 /* Allocate TX scheduler "rings". */ 514 if ((error = iwn_alloc_sched(sc)) != 0) { 515 device_printf(dev, 516 "could not allocate TX scheduler rings, error %d\n", error); 517 goto fail; 518 } 519 520 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 521 for (i = 0; i < sc->ntxqs; i++) { 522 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 523 device_printf(dev, 524 "could not allocate TX ring %d, error %d\n", i, 525 error); 526 goto fail; 527 } 528 } 529 530 /* Allocate RX ring. */ 531 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 532 device_printf(dev, "could not allocate RX ring, error %d\n", 533 error); 534 goto fail; 535 } 536 537 /* Clear pending interrupts. */ 538 IWN_WRITE(sc, IWN_INT, 0xffffffff); 539 540 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 541 if (ifp == NULL) { 542 device_printf(dev, "can not allocate ifnet structure\n"); 543 goto fail; 544 } 545 546 ic = ifp->if_l2com; 547 ic->ic_ifp = ifp; 548 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 549 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 550 551 /* Set device capabilities. */ 552 ic->ic_caps = 553 IEEE80211_C_STA /* station mode supported */ 554 | IEEE80211_C_MONITOR /* monitor mode supported */ 555 | IEEE80211_C_BGSCAN /* background scanning */ 556 | IEEE80211_C_TXPMGT /* tx power management */ 557 | IEEE80211_C_SHSLOT /* short slot time supported */ 558 | IEEE80211_C_WPA 559 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 560 #if 0 561 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 562 #endif 563 | IEEE80211_C_WME /* WME */ 564 | IEEE80211_C_PMGT /* Station-side power mgmt */ 565 ; 566 567 /* Read MAC address, channels, etc from EEPROM. */ 568 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 569 device_printf(dev, "could not read EEPROM, error %d\n", 570 error); 571 goto fail; 572 } 573 574 /* Count the number of available chains. */ 575 sc->ntxchains = 576 ((sc->txchainmask >> 2) & 1) + 577 ((sc->txchainmask >> 1) & 1) + 578 ((sc->txchainmask >> 0) & 1); 579 sc->nrxchains = 580 ((sc->rxchainmask >> 2) & 1) + 581 ((sc->rxchainmask >> 1) & 1) + 582 ((sc->rxchainmask >> 0) & 1); 583 if (bootverbose) { 584 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 585 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 586 macaddr, ":"); 587 } 588 589 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 590 ic->ic_rxstream = sc->nrxchains; 591 ic->ic_txstream = sc->ntxchains; 592 593 /* 594 * The NICs we currently support cap out at 2x2 support 595 * separate from the chains being used. 596 * 597 * This is a total hack to work around that until some 598 * per-device method is implemented to return the 599 * actual stream support. 600 * 601 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 602 * cap this! But, anything that touches rates in the 603 * driver needs to be audited first before 3x3 is enabled. 604 */ 605 if (ic->ic_rxstream > 2) 606 ic->ic_rxstream = 2; 607 if (ic->ic_txstream > 2) 608 ic->ic_txstream = 2; 609 610 ic->ic_htcaps = 611 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 612 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 613 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 614 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 615 #ifdef notyet 616 | IEEE80211_HTCAP_GREENFIELD 617 #if IWN_RBUF_SIZE == 8192 618 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 619 #else 620 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 621 #endif 622 #endif 623 /* s/w capabilities */ 624 | IEEE80211_HTC_HT /* HT operation */ 625 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 626 #ifdef notyet 627 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 628 #endif 629 ; 630 } 631 632 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 633 ifp->if_softc = sc; 634 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 635 ifp->if_init = iwn_init; 636 ifp->if_ioctl = iwn_ioctl; 637 ifp->if_start = iwn_start; 638 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 639 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 640 IFQ_SET_READY(&ifp->if_snd); 641 642 ieee80211_ifattach(ic, macaddr); 643 ic->ic_vap_create = iwn_vap_create; 644 ic->ic_vap_delete = iwn_vap_delete; 645 ic->ic_raw_xmit = iwn_raw_xmit; 646 ic->ic_node_alloc = iwn_node_alloc; 647 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 648 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 649 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 650 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 651 sc->sc_addba_request = ic->ic_addba_request; 652 ic->ic_addba_request = iwn_addba_request; 653 sc->sc_addba_response = ic->ic_addba_response; 654 ic->ic_addba_response = iwn_addba_response; 655 sc->sc_addba_stop = ic->ic_addba_stop; 656 ic->ic_addba_stop = iwn_ampdu_tx_stop; 657 ic->ic_newassoc = iwn_newassoc; 658 ic->ic_wme.wme_update = iwn_updateedca; 659 ic->ic_update_mcast = iwn_update_mcast; 660 ic->ic_scan_start = iwn_scan_start; 661 ic->ic_scan_end = iwn_scan_end; 662 ic->ic_set_channel = iwn_set_channel; 663 ic->ic_scan_curchan = iwn_scan_curchan; 664 ic->ic_scan_mindwell = iwn_scan_mindwell; 665 ic->ic_setregdomain = iwn_setregdomain; 666 667 iwn_radiotap_attach(sc); 668 669 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 670 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 671 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 672 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 673 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 674 675 iwn_sysctlattach(sc); 676 677 /* 678 * Hook our interrupt after all initialization is complete. 679 */ 680 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 681 NULL, iwn_intr, sc, &sc->sc_ih); 682 if (error != 0) { 683 device_printf(dev, "can't establish interrupt, error %d\n", 684 error); 685 goto fail; 686 } 687 688 if (bootverbose) 689 ieee80211_announce(ic); 690 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 691 return 0; 692 fail: 693 iwn_detach(dev); 694 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 695 return error; 696 } 697 698 /* 699 * Define specific configuration based on device id and subdevice id 700 * pid : PCI device id 701 */ 702 static int 703 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 704 { 705 706 switch (pid) { 707 /* 4965 series */ 708 case IWN_DID_4965_1: 709 case IWN_DID_4965_2: 710 case IWN_DID_4965_3: 711 case IWN_DID_4965_4: 712 sc->base_params = &iwn4965_base_params; 713 sc->limits = &iwn4965_sensitivity_limits; 714 sc->fwname = "iwn4965fw"; 715 /* Override chains masks, ROM is known to be broken. */ 716 sc->txchainmask = IWN_ANT_AB; 717 sc->rxchainmask = IWN_ANT_ABC; 718 /* Enable normal btcoex */ 719 sc->sc_flags |= IWN_FLAG_BTCOEX; 720 break; 721 /* 1000 Series */ 722 case IWN_DID_1000_1: 723 case IWN_DID_1000_2: 724 switch(sc->subdevice_id) { 725 case IWN_SDID_1000_1: 726 case IWN_SDID_1000_2: 727 case IWN_SDID_1000_3: 728 case IWN_SDID_1000_4: 729 case IWN_SDID_1000_5: 730 case IWN_SDID_1000_6: 731 case IWN_SDID_1000_7: 732 case IWN_SDID_1000_8: 733 case IWN_SDID_1000_9: 734 case IWN_SDID_1000_10: 735 case IWN_SDID_1000_11: 736 case IWN_SDID_1000_12: 737 sc->limits = &iwn1000_sensitivity_limits; 738 sc->base_params = &iwn1000_base_params; 739 sc->fwname = "iwn1000fw"; 740 break; 741 default: 742 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 743 "0x%04x rev %d not supported (subdevice)\n", pid, 744 sc->subdevice_id,sc->hw_type); 745 return ENOTSUP; 746 } 747 break; 748 /* 6x00 Series */ 749 case IWN_DID_6x00_2: 750 case IWN_DID_6x00_4: 751 case IWN_DID_6x00_1: 752 case IWN_DID_6x00_3: 753 sc->fwname = "iwn6000fw"; 754 sc->limits = &iwn6000_sensitivity_limits; 755 switch(sc->subdevice_id) { 756 case IWN_SDID_6x00_1: 757 case IWN_SDID_6x00_2: 758 case IWN_SDID_6x00_8: 759 //iwl6000_3agn_cfg 760 sc->base_params = &iwn_6000_base_params; 761 break; 762 case IWN_SDID_6x00_3: 763 case IWN_SDID_6x00_6: 764 case IWN_SDID_6x00_9: 765 ////iwl6000i_2agn 766 case IWN_SDID_6x00_4: 767 case IWN_SDID_6x00_7: 768 case IWN_SDID_6x00_10: 769 //iwl6000i_2abg_cfg 770 case IWN_SDID_6x00_5: 771 //iwl6000i_2bg_cfg 772 sc->base_params = &iwn_6000i_base_params; 773 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 774 sc->txchainmask = IWN_ANT_BC; 775 sc->rxchainmask = IWN_ANT_BC; 776 break; 777 default: 778 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 779 "0x%04x rev %d not supported (subdevice)\n", pid, 780 sc->subdevice_id,sc->hw_type); 781 return ENOTSUP; 782 } 783 break; 784 /* 6x05 Series */ 785 case IWN_DID_6x05_1: 786 case IWN_DID_6x05_2: 787 switch(sc->subdevice_id) { 788 case IWN_SDID_6x05_1: 789 case IWN_SDID_6x05_4: 790 case IWN_SDID_6x05_6: 791 //iwl6005_2agn_cfg 792 case IWN_SDID_6x05_2: 793 case IWN_SDID_6x05_5: 794 case IWN_SDID_6x05_7: 795 //iwl6005_2abg_cfg 796 case IWN_SDID_6x05_3: 797 //iwl6005_2bg_cfg 798 case IWN_SDID_6x05_8: 799 case IWN_SDID_6x05_9: 800 //iwl6005_2agn_sff_cfg 801 case IWN_SDID_6x05_10: 802 //iwl6005_2agn_d_cfg 803 case IWN_SDID_6x05_11: 804 //iwl6005_2agn_mow1_cfg 805 case IWN_SDID_6x05_12: 806 //iwl6005_2agn_mow2_cfg 807 sc->fwname = "iwn6000g2afw"; 808 sc->limits = &iwn6000_sensitivity_limits; 809 sc->base_params = &iwn_6000g2_base_params; 810 break; 811 default: 812 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 813 "0x%04x rev %d not supported (subdevice)\n", pid, 814 sc->subdevice_id,sc->hw_type); 815 return ENOTSUP; 816 } 817 break; 818 /* 6x35 Series */ 819 case IWN_DID_6035_1: 820 case IWN_DID_6035_2: 821 switch(sc->subdevice_id) { 822 case IWN_SDID_6035_1: 823 case IWN_SDID_6035_2: 824 case IWN_SDID_6035_3: 825 case IWN_SDID_6035_4: 826 sc->fwname = "iwn6000g2bfw"; 827 sc->limits = &iwn6235_sensitivity_limits; 828 sc->base_params = &iwn_6235_base_params; 829 break; 830 default: 831 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 832 "0x%04x rev %d not supported (subdevice)\n", pid, 833 sc->subdevice_id,sc->hw_type); 834 return ENOTSUP; 835 } 836 break; 837 /* 6x50 WiFi/WiMax Series */ 838 case IWN_DID_6050_1: 839 case IWN_DID_6050_2: 840 switch(sc->subdevice_id) { 841 case IWN_SDID_6050_1: 842 case IWN_SDID_6050_3: 843 case IWN_SDID_6050_5: 844 //iwl6050_2agn_cfg 845 case IWN_SDID_6050_2: 846 case IWN_SDID_6050_4: 847 case IWN_SDID_6050_6: 848 //iwl6050_2abg_cfg 849 sc->fwname = "iwn6050fw"; 850 sc->txchainmask = IWN_ANT_AB; 851 sc->rxchainmask = IWN_ANT_AB; 852 sc->limits = &iwn6000_sensitivity_limits; 853 sc->base_params = &iwn_6050_base_params; 854 break; 855 default: 856 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 857 "0x%04x rev %d not supported (subdevice)\n", pid, 858 sc->subdevice_id,sc->hw_type); 859 return ENOTSUP; 860 } 861 break; 862 /* 6150 WiFi/WiMax Series */ 863 case IWN_DID_6150_1: 864 case IWN_DID_6150_2: 865 switch(sc->subdevice_id) { 866 case IWN_SDID_6150_1: 867 case IWN_SDID_6150_3: 868 case IWN_SDID_6150_5: 869 // iwl6150_bgn_cfg 870 case IWN_SDID_6150_2: 871 case IWN_SDID_6150_4: 872 case IWN_SDID_6150_6: 873 //iwl6150_bg_cfg 874 sc->fwname = "iwn6050fw"; 875 sc->limits = &iwn6000_sensitivity_limits; 876 sc->base_params = &iwn_6150_base_params; 877 break; 878 default: 879 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 880 "0x%04x rev %d not supported (subdevice)\n", pid, 881 sc->subdevice_id,sc->hw_type); 882 return ENOTSUP; 883 } 884 break; 885 /* 6030 Series and 1030 Series */ 886 case IWN_DID_x030_1: 887 case IWN_DID_x030_2: 888 case IWN_DID_x030_3: 889 case IWN_DID_x030_4: 890 switch(sc->subdevice_id) { 891 case IWN_SDID_x030_1: 892 case IWN_SDID_x030_3: 893 case IWN_SDID_x030_5: 894 // iwl1030_bgn_cfg 895 case IWN_SDID_x030_2: 896 case IWN_SDID_x030_4: 897 case IWN_SDID_x030_6: 898 //iwl1030_bg_cfg 899 case IWN_SDID_x030_7: 900 case IWN_SDID_x030_10: 901 case IWN_SDID_x030_14: 902 //iwl6030_2agn_cfg 903 case IWN_SDID_x030_8: 904 case IWN_SDID_x030_11: 905 case IWN_SDID_x030_15: 906 // iwl6030_2bgn_cfg 907 case IWN_SDID_x030_9: 908 case IWN_SDID_x030_12: 909 case IWN_SDID_x030_16: 910 // iwl6030_2abg_cfg 911 case IWN_SDID_x030_13: 912 //iwl6030_2bg_cfg 913 sc->fwname = "iwn6000g2bfw"; 914 sc->limits = &iwn6000_sensitivity_limits; 915 sc->base_params = &iwn_6000g2b_base_params; 916 break; 917 default: 918 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 919 "0x%04x rev %d not supported (subdevice)\n", pid, 920 sc->subdevice_id,sc->hw_type); 921 return ENOTSUP; 922 } 923 break; 924 /* 130 Series WiFi */ 925 /* XXX: This series will need adjustment for rate. 926 * see rx_with_siso_diversity in linux kernel 927 */ 928 case IWN_DID_130_1: 929 case IWN_DID_130_2: 930 switch(sc->subdevice_id) { 931 case IWN_SDID_130_1: 932 case IWN_SDID_130_3: 933 case IWN_SDID_130_5: 934 //iwl130_bgn_cfg 935 case IWN_SDID_130_2: 936 case IWN_SDID_130_4: 937 case IWN_SDID_130_6: 938 //iwl130_bg_cfg 939 sc->fwname = "iwn6000g2bfw"; 940 sc->limits = &iwn6000_sensitivity_limits; 941 sc->base_params = &iwn_6000g2b_base_params; 942 break; 943 default: 944 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 945 "0x%04x rev %d not supported (subdevice)\n", pid, 946 sc->subdevice_id,sc->hw_type); 947 return ENOTSUP; 948 } 949 break; 950 /* 100 Series WiFi */ 951 case IWN_DID_100_1: 952 case IWN_DID_100_2: 953 switch(sc->subdevice_id) { 954 case IWN_SDID_100_1: 955 case IWN_SDID_100_2: 956 case IWN_SDID_100_3: 957 case IWN_SDID_100_4: 958 case IWN_SDID_100_5: 959 case IWN_SDID_100_6: 960 sc->limits = &iwn1000_sensitivity_limits; 961 sc->base_params = &iwn1000_base_params; 962 sc->fwname = "iwn100fw"; 963 break; 964 default: 965 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 966 "0x%04x rev %d not supported (subdevice)\n", pid, 967 sc->subdevice_id,sc->hw_type); 968 return ENOTSUP; 969 } 970 break; 971 972 /* 135 Series */ 973 /* XXX: This series will need adjustment for rate. 974 * see rx_with_siso_diversity in linux kernel 975 */ 976 case IWN_DID_135_1: 977 case IWN_DID_135_2: 978 switch(sc->subdevice_id) { 979 case IWN_SDID_135_1: 980 case IWN_SDID_135_2: 981 case IWN_SDID_135_3: 982 sc->limits = &iwn2030_sensitivity_limits; 983 sc->base_params = &iwn2030_base_params; 984 sc->fwname = "iwn135fw"; 985 break; 986 default: 987 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 988 "0x%04x rev %d not supported (subdevice)\n", pid, 989 sc->subdevice_id,sc->hw_type); 990 return ENOTSUP; 991 } 992 break; 993 994 /* 2x00 Series */ 995 case IWN_DID_2x00_1: 996 case IWN_DID_2x00_2: 997 switch(sc->subdevice_id) { 998 case IWN_SDID_2x00_1: 999 case IWN_SDID_2x00_2: 1000 case IWN_SDID_2x00_3: 1001 //iwl2000_2bgn_cfg 1002 case IWN_SDID_2x00_4: 1003 //iwl2000_2bgn_d_cfg 1004 sc->limits = &iwn2030_sensitivity_limits; 1005 sc->base_params = &iwn2000_base_params; 1006 sc->fwname = "iwn2000fw"; 1007 break; 1008 default: 1009 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1010 "0x%04x rev %d not supported (subdevice) \n", 1011 pid, sc->subdevice_id, sc->hw_type); 1012 return ENOTSUP; 1013 } 1014 break; 1015 /* 2x30 Series */ 1016 case IWN_DID_2x30_1: 1017 case IWN_DID_2x30_2: 1018 switch(sc->subdevice_id) { 1019 case IWN_SDID_2x30_1: 1020 case IWN_SDID_2x30_3: 1021 case IWN_SDID_2x30_5: 1022 //iwl100_bgn_cfg 1023 case IWN_SDID_2x30_2: 1024 case IWN_SDID_2x30_4: 1025 case IWN_SDID_2x30_6: 1026 //iwl100_bg_cfg 1027 sc->limits = &iwn2030_sensitivity_limits; 1028 sc->base_params = &iwn2030_base_params; 1029 sc->fwname = "iwn2030fw"; 1030 break; 1031 default: 1032 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1033 "0x%04x rev %d not supported (subdevice)\n", pid, 1034 sc->subdevice_id,sc->hw_type); 1035 return ENOTSUP; 1036 } 1037 break; 1038 /* 5x00 Series */ 1039 case IWN_DID_5x00_1: 1040 case IWN_DID_5x00_2: 1041 case IWN_DID_5x00_3: 1042 case IWN_DID_5x00_4: 1043 sc->limits = &iwn5000_sensitivity_limits; 1044 sc->base_params = &iwn5000_base_params; 1045 sc->fwname = "iwn5000fw"; 1046 switch(sc->subdevice_id) { 1047 case IWN_SDID_5x00_1: 1048 case IWN_SDID_5x00_2: 1049 case IWN_SDID_5x00_3: 1050 case IWN_SDID_5x00_4: 1051 case IWN_SDID_5x00_9: 1052 case IWN_SDID_5x00_10: 1053 case IWN_SDID_5x00_11: 1054 case IWN_SDID_5x00_12: 1055 case IWN_SDID_5x00_17: 1056 case IWN_SDID_5x00_18: 1057 case IWN_SDID_5x00_19: 1058 case IWN_SDID_5x00_20: 1059 //iwl5100_agn_cfg 1060 sc->txchainmask = IWN_ANT_B; 1061 sc->rxchainmask = IWN_ANT_AB; 1062 break; 1063 case IWN_SDID_5x00_5: 1064 case IWN_SDID_5x00_6: 1065 case IWN_SDID_5x00_13: 1066 case IWN_SDID_5x00_14: 1067 case IWN_SDID_5x00_21: 1068 case IWN_SDID_5x00_22: 1069 //iwl5100_bgn_cfg 1070 sc->txchainmask = IWN_ANT_B; 1071 sc->rxchainmask = IWN_ANT_AB; 1072 break; 1073 case IWN_SDID_5x00_7: 1074 case IWN_SDID_5x00_8: 1075 case IWN_SDID_5x00_15: 1076 case IWN_SDID_5x00_16: 1077 case IWN_SDID_5x00_23: 1078 case IWN_SDID_5x00_24: 1079 //iwl5100_abg_cfg 1080 sc->txchainmask = IWN_ANT_B; 1081 sc->rxchainmask = IWN_ANT_AB; 1082 break; 1083 case IWN_SDID_5x00_25: 1084 case IWN_SDID_5x00_26: 1085 case IWN_SDID_5x00_27: 1086 case IWN_SDID_5x00_28: 1087 case IWN_SDID_5x00_29: 1088 case IWN_SDID_5x00_30: 1089 case IWN_SDID_5x00_31: 1090 case IWN_SDID_5x00_32: 1091 case IWN_SDID_5x00_33: 1092 case IWN_SDID_5x00_34: 1093 case IWN_SDID_5x00_35: 1094 case IWN_SDID_5x00_36: 1095 //iwl5300_agn_cfg 1096 sc->txchainmask = IWN_ANT_ABC; 1097 sc->rxchainmask = IWN_ANT_ABC; 1098 break; 1099 default: 1100 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1101 "0x%04x rev %d not supported (subdevice)\n", pid, 1102 sc->subdevice_id,sc->hw_type); 1103 return ENOTSUP; 1104 } 1105 break; 1106 /* 5x50 Series */ 1107 case IWN_DID_5x50_1: 1108 case IWN_DID_5x50_2: 1109 case IWN_DID_5x50_3: 1110 case IWN_DID_5x50_4: 1111 sc->limits = &iwn5000_sensitivity_limits; 1112 sc->base_params = &iwn5000_base_params; 1113 sc->fwname = "iwn5000fw"; 1114 switch(sc->subdevice_id) { 1115 case IWN_SDID_5x50_1: 1116 case IWN_SDID_5x50_2: 1117 case IWN_SDID_5x50_3: 1118 //iwl5350_agn_cfg 1119 sc->limits = &iwn5000_sensitivity_limits; 1120 sc->base_params = &iwn5000_base_params; 1121 sc->fwname = "iwn5000fw"; 1122 break; 1123 case IWN_SDID_5x50_4: 1124 case IWN_SDID_5x50_5: 1125 case IWN_SDID_5x50_8: 1126 case IWN_SDID_5x50_9: 1127 case IWN_SDID_5x50_10: 1128 case IWN_SDID_5x50_11: 1129 //iwl5150_agn_cfg 1130 case IWN_SDID_5x50_6: 1131 case IWN_SDID_5x50_7: 1132 case IWN_SDID_5x50_12: 1133 case IWN_SDID_5x50_13: 1134 //iwl5150_abg_cfg 1135 sc->limits = &iwn5000_sensitivity_limits; 1136 sc->fwname = "iwn5150fw"; 1137 sc->base_params = &iwn_5x50_base_params; 1138 break; 1139 default: 1140 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1141 "0x%04x rev %d not supported (subdevice)\n", pid, 1142 sc->subdevice_id,sc->hw_type); 1143 return ENOTSUP; 1144 } 1145 break; 1146 default: 1147 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1148 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1149 sc->hw_type); 1150 return ENOTSUP; 1151 } 1152 return 0; 1153 } 1154 1155 static int 1156 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1157 { 1158 struct iwn_ops *ops = &sc->ops; 1159 1160 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1161 ops->load_firmware = iwn4965_load_firmware; 1162 ops->read_eeprom = iwn4965_read_eeprom; 1163 ops->post_alive = iwn4965_post_alive; 1164 ops->nic_config = iwn4965_nic_config; 1165 ops->update_sched = iwn4965_update_sched; 1166 ops->get_temperature = iwn4965_get_temperature; 1167 ops->get_rssi = iwn4965_get_rssi; 1168 ops->set_txpower = iwn4965_set_txpower; 1169 ops->init_gains = iwn4965_init_gains; 1170 ops->set_gains = iwn4965_set_gains; 1171 ops->add_node = iwn4965_add_node; 1172 ops->tx_done = iwn4965_tx_done; 1173 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1174 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1175 sc->ntxqs = IWN4965_NTXQUEUES; 1176 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1177 sc->ndmachnls = IWN4965_NDMACHNLS; 1178 sc->broadcast_id = IWN4965_ID_BROADCAST; 1179 sc->rxonsz = IWN4965_RXONSZ; 1180 sc->schedsz = IWN4965_SCHEDSZ; 1181 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1182 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1183 sc->fwsz = IWN4965_FWSZ; 1184 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1185 sc->limits = &iwn4965_sensitivity_limits; 1186 sc->fwname = "iwn4965fw"; 1187 /* Override chains masks, ROM is known to be broken. */ 1188 sc->txchainmask = IWN_ANT_AB; 1189 sc->rxchainmask = IWN_ANT_ABC; 1190 /* Enable normal btcoex */ 1191 sc->sc_flags |= IWN_FLAG_BTCOEX; 1192 1193 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1194 1195 return 0; 1196 } 1197 1198 static int 1199 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1200 { 1201 struct iwn_ops *ops = &sc->ops; 1202 1203 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1204 1205 ops->load_firmware = iwn5000_load_firmware; 1206 ops->read_eeprom = iwn5000_read_eeprom; 1207 ops->post_alive = iwn5000_post_alive; 1208 ops->nic_config = iwn5000_nic_config; 1209 ops->update_sched = iwn5000_update_sched; 1210 ops->get_temperature = iwn5000_get_temperature; 1211 ops->get_rssi = iwn5000_get_rssi; 1212 ops->set_txpower = iwn5000_set_txpower; 1213 ops->init_gains = iwn5000_init_gains; 1214 ops->set_gains = iwn5000_set_gains; 1215 ops->add_node = iwn5000_add_node; 1216 ops->tx_done = iwn5000_tx_done; 1217 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1218 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1219 sc->ntxqs = IWN5000_NTXQUEUES; 1220 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1221 sc->ndmachnls = IWN5000_NDMACHNLS; 1222 sc->broadcast_id = IWN5000_ID_BROADCAST; 1223 sc->rxonsz = IWN5000_RXONSZ; 1224 sc->schedsz = IWN5000_SCHEDSZ; 1225 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1226 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1227 sc->fwsz = IWN5000_FWSZ; 1228 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1229 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1230 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1231 1232 return 0; 1233 } 1234 1235 /* 1236 * Attach the interface to 802.11 radiotap. 1237 */ 1238 static void 1239 iwn_radiotap_attach(struct iwn_softc *sc) 1240 { 1241 struct ifnet *ifp = sc->sc_ifp; 1242 struct ieee80211com *ic = ifp->if_l2com; 1243 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1244 ieee80211_radiotap_attach(ic, 1245 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1246 IWN_TX_RADIOTAP_PRESENT, 1247 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1248 IWN_RX_RADIOTAP_PRESENT); 1249 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1250 } 1251 1252 static void 1253 iwn_sysctlattach(struct iwn_softc *sc) 1254 { 1255 #ifdef IWN_DEBUG 1256 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1257 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1258 1259 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1260 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1261 "control debugging printfs"); 1262 #endif 1263 } 1264 1265 static struct ieee80211vap * 1266 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1267 enum ieee80211_opmode opmode, int flags, 1268 const uint8_t bssid[IEEE80211_ADDR_LEN], 1269 const uint8_t mac[IEEE80211_ADDR_LEN]) 1270 { 1271 struct iwn_vap *ivp; 1272 struct ieee80211vap *vap; 1273 uint8_t mac1[IEEE80211_ADDR_LEN]; 1274 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1275 1276 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1277 return NULL; 1278 1279 IEEE80211_ADDR_COPY(mac1, mac); 1280 1281 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 1282 M_80211_VAP, M_NOWAIT | M_ZERO); 1283 if (ivp == NULL) 1284 return NULL; 1285 vap = &ivp->iv_vap; 1286 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1287 ivp->ctx = IWN_RXON_BSS_CTX; 1288 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1289 vap->iv_bmissthreshold = 10; /* override default */ 1290 /* Override with driver methods. */ 1291 ivp->iv_newstate = vap->iv_newstate; 1292 vap->iv_newstate = iwn_newstate; 1293 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1294 1295 ieee80211_ratectl_init(vap); 1296 /* Complete setup. */ 1297 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1298 ic->ic_opmode = opmode; 1299 return vap; 1300 } 1301 1302 static void 1303 iwn_vap_delete(struct ieee80211vap *vap) 1304 { 1305 struct iwn_vap *ivp = IWN_VAP(vap); 1306 1307 ieee80211_ratectl_deinit(vap); 1308 ieee80211_vap_detach(vap); 1309 free(ivp, M_80211_VAP); 1310 } 1311 1312 static int 1313 iwn_detach(device_t dev) 1314 { 1315 struct iwn_softc *sc = device_get_softc(dev); 1316 struct ifnet *ifp = sc->sc_ifp; 1317 struct ieee80211com *ic; 1318 int qid; 1319 1320 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1321 1322 if (ifp != NULL) { 1323 ic = ifp->if_l2com; 1324 1325 ieee80211_draintask(ic, &sc->sc_reinit_task); 1326 ieee80211_draintask(ic, &sc->sc_radioon_task); 1327 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1328 1329 iwn_stop(sc); 1330 callout_drain(&sc->watchdog_to); 1331 callout_drain(&sc->calib_to); 1332 ieee80211_ifdetach(ic); 1333 } 1334 1335 /* Uninstall interrupt handler. */ 1336 if (sc->irq != NULL) { 1337 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1338 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1339 sc->irq); 1340 pci_release_msi(dev); 1341 } 1342 1343 /* Free DMA resources. */ 1344 iwn_free_rx_ring(sc, &sc->rxq); 1345 for (qid = 0; qid < sc->ntxqs; qid++) 1346 iwn_free_tx_ring(sc, &sc->txq[qid]); 1347 iwn_free_sched(sc); 1348 iwn_free_kw(sc); 1349 if (sc->ict != NULL) 1350 iwn_free_ict(sc); 1351 iwn_free_fwmem(sc); 1352 1353 if (sc->mem != NULL) 1354 bus_release_resource(dev, SYS_RES_MEMORY, 1355 rman_get_rid(sc->mem), sc->mem); 1356 1357 if (ifp != NULL) 1358 if_free(ifp); 1359 1360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1361 IWN_LOCK_DESTROY(sc); 1362 return 0; 1363 } 1364 1365 static int 1366 iwn_shutdown(device_t dev) 1367 { 1368 struct iwn_softc *sc = device_get_softc(dev); 1369 1370 iwn_stop(sc); 1371 return 0; 1372 } 1373 1374 static int 1375 iwn_suspend(device_t dev) 1376 { 1377 struct iwn_softc *sc = device_get_softc(dev); 1378 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1379 1380 ieee80211_suspend_all(ic); 1381 return 0; 1382 } 1383 1384 static int 1385 iwn_resume(device_t dev) 1386 { 1387 struct iwn_softc *sc = device_get_softc(dev); 1388 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1389 1390 /* Clear device-specific "PCI retry timeout" register (41h). */ 1391 pci_write_config(dev, 0x41, 0, 1); 1392 1393 ieee80211_resume_all(ic); 1394 return 0; 1395 } 1396 1397 static int 1398 iwn_nic_lock(struct iwn_softc *sc) 1399 { 1400 int ntries; 1401 1402 /* Request exclusive access to NIC. */ 1403 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1404 1405 /* Spin until we actually get the lock. */ 1406 for (ntries = 0; ntries < 1000; ntries++) { 1407 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1408 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1409 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1410 return 0; 1411 DELAY(10); 1412 } 1413 return ETIMEDOUT; 1414 } 1415 1416 static __inline void 1417 iwn_nic_unlock(struct iwn_softc *sc) 1418 { 1419 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1420 } 1421 1422 static __inline uint32_t 1423 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1424 { 1425 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1426 IWN_BARRIER_READ_WRITE(sc); 1427 return IWN_READ(sc, IWN_PRPH_RDATA); 1428 } 1429 1430 static __inline void 1431 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1432 { 1433 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1434 IWN_BARRIER_WRITE(sc); 1435 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1436 } 1437 1438 static __inline void 1439 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1440 { 1441 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1442 } 1443 1444 static __inline void 1445 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1446 { 1447 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1448 } 1449 1450 static __inline void 1451 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1452 const uint32_t *data, int count) 1453 { 1454 for (; count > 0; count--, data++, addr += 4) 1455 iwn_prph_write(sc, addr, *data); 1456 } 1457 1458 static __inline uint32_t 1459 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1460 { 1461 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1462 IWN_BARRIER_READ_WRITE(sc); 1463 return IWN_READ(sc, IWN_MEM_RDATA); 1464 } 1465 1466 static __inline void 1467 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1468 { 1469 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1470 IWN_BARRIER_WRITE(sc); 1471 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1472 } 1473 1474 static __inline void 1475 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1476 { 1477 uint32_t tmp; 1478 1479 tmp = iwn_mem_read(sc, addr & ~3); 1480 if (addr & 3) 1481 tmp = (tmp & 0x0000ffff) | data << 16; 1482 else 1483 tmp = (tmp & 0xffff0000) | data; 1484 iwn_mem_write(sc, addr & ~3, tmp); 1485 } 1486 1487 static __inline void 1488 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1489 int count) 1490 { 1491 for (; count > 0; count--, addr += 4) 1492 *data++ = iwn_mem_read(sc, addr); 1493 } 1494 1495 static __inline void 1496 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1497 int count) 1498 { 1499 for (; count > 0; count--, addr += 4) 1500 iwn_mem_write(sc, addr, val); 1501 } 1502 1503 static int 1504 iwn_eeprom_lock(struct iwn_softc *sc) 1505 { 1506 int i, ntries; 1507 1508 for (i = 0; i < 100; i++) { 1509 /* Request exclusive access to EEPROM. */ 1510 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1511 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1512 1513 /* Spin until we actually get the lock. */ 1514 for (ntries = 0; ntries < 100; ntries++) { 1515 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1516 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1517 return 0; 1518 DELAY(10); 1519 } 1520 } 1521 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1522 return ETIMEDOUT; 1523 } 1524 1525 static __inline void 1526 iwn_eeprom_unlock(struct iwn_softc *sc) 1527 { 1528 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1529 } 1530 1531 /* 1532 * Initialize access by host to One Time Programmable ROM. 1533 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1534 */ 1535 static int 1536 iwn_init_otprom(struct iwn_softc *sc) 1537 { 1538 uint16_t prev, base, next; 1539 int count, error; 1540 1541 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1542 1543 /* Wait for clock stabilization before accessing prph. */ 1544 if ((error = iwn_clock_wait(sc)) != 0) 1545 return error; 1546 1547 if ((error = iwn_nic_lock(sc)) != 0) 1548 return error; 1549 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1550 DELAY(5); 1551 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1552 iwn_nic_unlock(sc); 1553 1554 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1555 if (sc->base_params->shadow_ram_support) { 1556 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1557 IWN_RESET_LINK_PWR_MGMT_DIS); 1558 } 1559 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1560 /* Clear ECC status. */ 1561 IWN_SETBITS(sc, IWN_OTP_GP, 1562 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1563 1564 /* 1565 * Find the block before last block (contains the EEPROM image) 1566 * for HW without OTP shadow RAM. 1567 */ 1568 if (! sc->base_params->shadow_ram_support) { 1569 /* Switch to absolute addressing mode. */ 1570 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1571 base = prev = 0; 1572 for (count = 0; count < sc->base_params->max_ll_items; 1573 count++) { 1574 error = iwn_read_prom_data(sc, base, &next, 2); 1575 if (error != 0) 1576 return error; 1577 if (next == 0) /* End of linked-list. */ 1578 break; 1579 prev = base; 1580 base = le16toh(next); 1581 } 1582 if (count == 0 || count == sc->base_params->max_ll_items) 1583 return EIO; 1584 /* Skip "next" word. */ 1585 sc->prom_base = prev + 1; 1586 } 1587 1588 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1589 1590 return 0; 1591 } 1592 1593 static int 1594 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1595 { 1596 uint8_t *out = data; 1597 uint32_t val, tmp; 1598 int ntries; 1599 1600 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1601 1602 addr += sc->prom_base; 1603 for (; count > 0; count -= 2, addr++) { 1604 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1605 for (ntries = 0; ntries < 10; ntries++) { 1606 val = IWN_READ(sc, IWN_EEPROM); 1607 if (val & IWN_EEPROM_READ_VALID) 1608 break; 1609 DELAY(5); 1610 } 1611 if (ntries == 10) { 1612 device_printf(sc->sc_dev, 1613 "timeout reading ROM at 0x%x\n", addr); 1614 return ETIMEDOUT; 1615 } 1616 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1617 /* OTPROM, check for ECC errors. */ 1618 tmp = IWN_READ(sc, IWN_OTP_GP); 1619 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1620 device_printf(sc->sc_dev, 1621 "OTPROM ECC error at 0x%x\n", addr); 1622 return EIO; 1623 } 1624 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1625 /* Correctable ECC error, clear bit. */ 1626 IWN_SETBITS(sc, IWN_OTP_GP, 1627 IWN_OTP_GP_ECC_CORR_STTS); 1628 } 1629 } 1630 *out++ = val >> 16; 1631 if (count > 1) 1632 *out++ = val >> 24; 1633 } 1634 1635 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1636 1637 return 0; 1638 } 1639 1640 static void 1641 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1642 { 1643 if (error != 0) 1644 return; 1645 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1646 *(bus_addr_t *)arg = segs[0].ds_addr; 1647 } 1648 1649 static int 1650 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1651 void **kvap, bus_size_t size, bus_size_t alignment) 1652 { 1653 int error; 1654 1655 dma->tag = NULL; 1656 dma->size = size; 1657 1658 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1659 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1660 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1661 if (error != 0) 1662 goto fail; 1663 1664 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1665 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1666 if (error != 0) 1667 goto fail; 1668 1669 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1670 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1671 if (error != 0) 1672 goto fail; 1673 1674 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1675 1676 if (kvap != NULL) 1677 *kvap = dma->vaddr; 1678 1679 return 0; 1680 1681 fail: iwn_dma_contig_free(dma); 1682 return error; 1683 } 1684 1685 static void 1686 iwn_dma_contig_free(struct iwn_dma_info *dma) 1687 { 1688 if (dma->map != NULL) { 1689 if (dma->vaddr != NULL) { 1690 bus_dmamap_sync(dma->tag, dma->map, 1691 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1692 bus_dmamap_unload(dma->tag, dma->map); 1693 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1694 dma->vaddr = NULL; 1695 } 1696 bus_dmamap_destroy(dma->tag, dma->map); 1697 dma->map = NULL; 1698 } 1699 if (dma->tag != NULL) { 1700 bus_dma_tag_destroy(dma->tag); 1701 dma->tag = NULL; 1702 } 1703 } 1704 1705 static int 1706 iwn_alloc_sched(struct iwn_softc *sc) 1707 { 1708 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1709 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1710 sc->schedsz, 1024); 1711 } 1712 1713 static void 1714 iwn_free_sched(struct iwn_softc *sc) 1715 { 1716 iwn_dma_contig_free(&sc->sched_dma); 1717 } 1718 1719 static int 1720 iwn_alloc_kw(struct iwn_softc *sc) 1721 { 1722 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1723 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1724 } 1725 1726 static void 1727 iwn_free_kw(struct iwn_softc *sc) 1728 { 1729 iwn_dma_contig_free(&sc->kw_dma); 1730 } 1731 1732 static int 1733 iwn_alloc_ict(struct iwn_softc *sc) 1734 { 1735 /* ICT table must be aligned on a 4KB boundary. */ 1736 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1737 IWN_ICT_SIZE, 4096); 1738 } 1739 1740 static void 1741 iwn_free_ict(struct iwn_softc *sc) 1742 { 1743 iwn_dma_contig_free(&sc->ict_dma); 1744 } 1745 1746 static int 1747 iwn_alloc_fwmem(struct iwn_softc *sc) 1748 { 1749 /* Must be aligned on a 16-byte boundary. */ 1750 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1751 } 1752 1753 static void 1754 iwn_free_fwmem(struct iwn_softc *sc) 1755 { 1756 iwn_dma_contig_free(&sc->fw_dma); 1757 } 1758 1759 static int 1760 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1761 { 1762 bus_size_t size; 1763 int i, error; 1764 1765 ring->cur = 0; 1766 1767 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1768 1769 /* Allocate RX descriptors (256-byte aligned). */ 1770 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1771 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1772 size, 256); 1773 if (error != 0) { 1774 device_printf(sc->sc_dev, 1775 "%s: could not allocate RX ring DMA memory, error %d\n", 1776 __func__, error); 1777 goto fail; 1778 } 1779 1780 /* Allocate RX status area (16-byte aligned). */ 1781 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1782 sizeof (struct iwn_rx_status), 16); 1783 if (error != 0) { 1784 device_printf(sc->sc_dev, 1785 "%s: could not allocate RX status DMA memory, error %d\n", 1786 __func__, error); 1787 goto fail; 1788 } 1789 1790 /* Create RX buffer DMA tag. */ 1791 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1792 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1793 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1794 &ring->data_dmat); 1795 if (error != 0) { 1796 device_printf(sc->sc_dev, 1797 "%s: could not create RX buf DMA tag, error %d\n", 1798 __func__, error); 1799 goto fail; 1800 } 1801 1802 /* 1803 * Allocate and map RX buffers. 1804 */ 1805 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1806 struct iwn_rx_data *data = &ring->data[i]; 1807 bus_addr_t paddr; 1808 1809 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1810 if (error != 0) { 1811 device_printf(sc->sc_dev, 1812 "%s: could not create RX buf DMA map, error %d\n", 1813 __func__, error); 1814 goto fail; 1815 } 1816 1817 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1818 IWN_RBUF_SIZE); 1819 if (data->m == NULL) { 1820 device_printf(sc->sc_dev, 1821 "%s: could not allocate RX mbuf\n", __func__); 1822 error = ENOBUFS; 1823 goto fail; 1824 } 1825 1826 error = bus_dmamap_load(ring->data_dmat, data->map, 1827 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1828 &paddr, BUS_DMA_NOWAIT); 1829 if (error != 0 && error != EFBIG) { 1830 device_printf(sc->sc_dev, 1831 "%s: can't not map mbuf, error %d\n", __func__, 1832 error); 1833 goto fail; 1834 } 1835 1836 /* Set physical address of RX buffer (256-byte aligned). */ 1837 ring->desc[i] = htole32(paddr >> 8); 1838 } 1839 1840 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1841 BUS_DMASYNC_PREWRITE); 1842 1843 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1844 1845 return 0; 1846 1847 fail: iwn_free_rx_ring(sc, ring); 1848 1849 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1850 1851 return error; 1852 } 1853 1854 static void 1855 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1856 { 1857 int ntries; 1858 1859 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1860 1861 if (iwn_nic_lock(sc) == 0) { 1862 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1863 for (ntries = 0; ntries < 1000; ntries++) { 1864 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1865 IWN_FH_RX_STATUS_IDLE) 1866 break; 1867 DELAY(10); 1868 } 1869 iwn_nic_unlock(sc); 1870 } 1871 ring->cur = 0; 1872 sc->last_rx_valid = 0; 1873 } 1874 1875 static void 1876 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1877 { 1878 int i; 1879 1880 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1881 1882 iwn_dma_contig_free(&ring->desc_dma); 1883 iwn_dma_contig_free(&ring->stat_dma); 1884 1885 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1886 struct iwn_rx_data *data = &ring->data[i]; 1887 1888 if (data->m != NULL) { 1889 bus_dmamap_sync(ring->data_dmat, data->map, 1890 BUS_DMASYNC_POSTREAD); 1891 bus_dmamap_unload(ring->data_dmat, data->map); 1892 m_freem(data->m); 1893 data->m = NULL; 1894 } 1895 if (data->map != NULL) 1896 bus_dmamap_destroy(ring->data_dmat, data->map); 1897 } 1898 if (ring->data_dmat != NULL) { 1899 bus_dma_tag_destroy(ring->data_dmat); 1900 ring->data_dmat = NULL; 1901 } 1902 } 1903 1904 static int 1905 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1906 { 1907 bus_addr_t paddr; 1908 bus_size_t size; 1909 int i, error; 1910 1911 ring->qid = qid; 1912 ring->queued = 0; 1913 ring->cur = 0; 1914 1915 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1916 1917 /* Allocate TX descriptors (256-byte aligned). */ 1918 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1919 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1920 size, 256); 1921 if (error != 0) { 1922 device_printf(sc->sc_dev, 1923 "%s: could not allocate TX ring DMA memory, error %d\n", 1924 __func__, error); 1925 goto fail; 1926 } 1927 1928 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1929 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1930 size, 4); 1931 if (error != 0) { 1932 device_printf(sc->sc_dev, 1933 "%s: could not allocate TX cmd DMA memory, error %d\n", 1934 __func__, error); 1935 goto fail; 1936 } 1937 1938 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1939 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1940 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1941 &ring->data_dmat); 1942 if (error != 0) { 1943 device_printf(sc->sc_dev, 1944 "%s: could not create TX buf DMA tag, error %d\n", 1945 __func__, error); 1946 goto fail; 1947 } 1948 1949 paddr = ring->cmd_dma.paddr; 1950 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1951 struct iwn_tx_data *data = &ring->data[i]; 1952 1953 data->cmd_paddr = paddr; 1954 data->scratch_paddr = paddr + 12; 1955 paddr += sizeof (struct iwn_tx_cmd); 1956 1957 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1958 if (error != 0) { 1959 device_printf(sc->sc_dev, 1960 "%s: could not create TX buf DMA map, error %d\n", 1961 __func__, error); 1962 goto fail; 1963 } 1964 } 1965 1966 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1967 1968 return 0; 1969 1970 fail: iwn_free_tx_ring(sc, ring); 1971 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 1972 return error; 1973 } 1974 1975 static void 1976 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1977 { 1978 int i; 1979 1980 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 1981 1982 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1983 struct iwn_tx_data *data = &ring->data[i]; 1984 1985 if (data->m != NULL) { 1986 bus_dmamap_sync(ring->data_dmat, data->map, 1987 BUS_DMASYNC_POSTWRITE); 1988 bus_dmamap_unload(ring->data_dmat, data->map); 1989 m_freem(data->m); 1990 data->m = NULL; 1991 } 1992 } 1993 /* Clear TX descriptors. */ 1994 memset(ring->desc, 0, ring->desc_dma.size); 1995 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1996 BUS_DMASYNC_PREWRITE); 1997 sc->qfullmsk &= ~(1 << ring->qid); 1998 ring->queued = 0; 1999 ring->cur = 0; 2000 } 2001 2002 static void 2003 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2004 { 2005 int i; 2006 2007 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2008 2009 iwn_dma_contig_free(&ring->desc_dma); 2010 iwn_dma_contig_free(&ring->cmd_dma); 2011 2012 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2013 struct iwn_tx_data *data = &ring->data[i]; 2014 2015 if (data->m != NULL) { 2016 bus_dmamap_sync(ring->data_dmat, data->map, 2017 BUS_DMASYNC_POSTWRITE); 2018 bus_dmamap_unload(ring->data_dmat, data->map); 2019 m_freem(data->m); 2020 } 2021 if (data->map != NULL) 2022 bus_dmamap_destroy(ring->data_dmat, data->map); 2023 } 2024 if (ring->data_dmat != NULL) { 2025 bus_dma_tag_destroy(ring->data_dmat); 2026 ring->data_dmat = NULL; 2027 } 2028 } 2029 2030 static void 2031 iwn5000_ict_reset(struct iwn_softc *sc) 2032 { 2033 /* Disable interrupts. */ 2034 IWN_WRITE(sc, IWN_INT_MASK, 0); 2035 2036 /* Reset ICT table. */ 2037 memset(sc->ict, 0, IWN_ICT_SIZE); 2038 sc->ict_cur = 0; 2039 2040 /* Set physical address of ICT table (4KB aligned). */ 2041 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2042 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2043 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2044 2045 /* Enable periodic RX interrupt. */ 2046 sc->int_mask |= IWN_INT_RX_PERIODIC; 2047 /* Switch to ICT interrupt mode in driver. */ 2048 sc->sc_flags |= IWN_FLAG_USE_ICT; 2049 2050 /* Re-enable interrupts. */ 2051 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2052 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2053 } 2054 2055 static int 2056 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2057 { 2058 struct iwn_ops *ops = &sc->ops; 2059 uint16_t val; 2060 int error; 2061 2062 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2063 2064 /* Check whether adapter has an EEPROM or an OTPROM. */ 2065 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2066 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2067 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2068 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2069 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2070 2071 /* Adapter has to be powered on for EEPROM access to work. */ 2072 if ((error = iwn_apm_init(sc)) != 0) { 2073 device_printf(sc->sc_dev, 2074 "%s: could not power ON adapter, error %d\n", __func__, 2075 error); 2076 return error; 2077 } 2078 2079 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2080 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2081 return EIO; 2082 } 2083 if ((error = iwn_eeprom_lock(sc)) != 0) { 2084 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2085 __func__, error); 2086 return error; 2087 } 2088 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2089 if ((error = iwn_init_otprom(sc)) != 0) { 2090 device_printf(sc->sc_dev, 2091 "%s: could not initialize OTPROM, error %d\n", 2092 __func__, error); 2093 return error; 2094 } 2095 } 2096 2097 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2098 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2099 /* Check if HT support is bonded out. */ 2100 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2101 sc->sc_flags |= IWN_FLAG_HAS_11N; 2102 2103 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2104 sc->rfcfg = le16toh(val); 2105 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2106 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2107 if (sc->txchainmask == 0) 2108 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2109 if (sc->rxchainmask == 0) 2110 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2111 2112 /* Read MAC address. */ 2113 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2114 2115 /* Read adapter-specific information from EEPROM. */ 2116 ops->read_eeprom(sc); 2117 2118 iwn_apm_stop(sc); /* Power OFF adapter. */ 2119 2120 iwn_eeprom_unlock(sc); 2121 2122 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2123 2124 return 0; 2125 } 2126 2127 static void 2128 iwn4965_read_eeprom(struct iwn_softc *sc) 2129 { 2130 uint32_t addr; 2131 uint16_t val; 2132 int i; 2133 2134 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2135 2136 /* Read regulatory domain (4 ASCII characters). */ 2137 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2138 2139 /* Read the list of authorized channels (20MHz ones only). */ 2140 for (i = 0; i < IWN_NBANDS - 1; i++) { 2141 addr = iwn4965_regulatory_bands[i]; 2142 iwn_read_eeprom_channels(sc, i, addr); 2143 } 2144 2145 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2146 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2147 sc->maxpwr2GHz = val & 0xff; 2148 sc->maxpwr5GHz = val >> 8; 2149 /* Check that EEPROM values are within valid range. */ 2150 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2151 sc->maxpwr5GHz = 38; 2152 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2153 sc->maxpwr2GHz = 38; 2154 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2155 sc->maxpwr2GHz, sc->maxpwr5GHz); 2156 2157 /* Read samples for each TX power group. */ 2158 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2159 sizeof sc->bands); 2160 2161 /* Read voltage at which samples were taken. */ 2162 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2163 sc->eeprom_voltage = (int16_t)le16toh(val); 2164 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2165 sc->eeprom_voltage); 2166 2167 #ifdef IWN_DEBUG 2168 /* Print samples. */ 2169 if (sc->sc_debug & IWN_DEBUG_ANY) { 2170 for (i = 0; i < IWN_NBANDS - 1; i++) 2171 iwn4965_print_power_group(sc, i); 2172 } 2173 #endif 2174 2175 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2176 } 2177 2178 #ifdef IWN_DEBUG 2179 static void 2180 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2181 { 2182 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2183 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2184 int j, c; 2185 2186 printf("===band %d===\n", i); 2187 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2188 printf("chan1 num=%d\n", chans[0].num); 2189 for (c = 0; c < 2; c++) { 2190 for (j = 0; j < IWN_NSAMPLES; j++) { 2191 printf("chain %d, sample %d: temp=%d gain=%d " 2192 "power=%d pa_det=%d\n", c, j, 2193 chans[0].samples[c][j].temp, 2194 chans[0].samples[c][j].gain, 2195 chans[0].samples[c][j].power, 2196 chans[0].samples[c][j].pa_det); 2197 } 2198 } 2199 printf("chan2 num=%d\n", chans[1].num); 2200 for (c = 0; c < 2; c++) { 2201 for (j = 0; j < IWN_NSAMPLES; j++) { 2202 printf("chain %d, sample %d: temp=%d gain=%d " 2203 "power=%d pa_det=%d\n", c, j, 2204 chans[1].samples[c][j].temp, 2205 chans[1].samples[c][j].gain, 2206 chans[1].samples[c][j].power, 2207 chans[1].samples[c][j].pa_det); 2208 } 2209 } 2210 } 2211 #endif 2212 2213 static void 2214 iwn5000_read_eeprom(struct iwn_softc *sc) 2215 { 2216 struct iwn5000_eeprom_calib_hdr hdr; 2217 int32_t volt; 2218 uint32_t base, addr; 2219 uint16_t val; 2220 int i; 2221 2222 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2223 2224 /* Read regulatory domain (4 ASCII characters). */ 2225 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2226 base = le16toh(val); 2227 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2228 sc->eeprom_domain, 4); 2229 2230 /* Read the list of authorized channels (20MHz ones only). */ 2231 for (i = 0; i < IWN_NBANDS - 1; i++) { 2232 addr = base + sc->base_params->regulatory_bands[i]; 2233 iwn_read_eeprom_channels(sc, i, addr); 2234 } 2235 2236 /* Read enhanced TX power information for 6000 Series. */ 2237 if (sc->base_params->enhanced_TX_power) 2238 iwn_read_eeprom_enhinfo(sc); 2239 2240 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2241 base = le16toh(val); 2242 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2243 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2244 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2245 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2246 sc->calib_ver = hdr.version; 2247 2248 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2249 sc->eeprom_voltage = le16toh(hdr.volt); 2250 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2251 sc->eeprom_temp_high=le16toh(val); 2252 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2253 sc->eeprom_temp = le16toh(val); 2254 } 2255 2256 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2257 /* Compute temperature offset. */ 2258 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2259 sc->eeprom_temp = le16toh(val); 2260 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2261 volt = le16toh(val); 2262 sc->temp_off = sc->eeprom_temp - (volt / -5); 2263 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2264 sc->eeprom_temp, volt, sc->temp_off); 2265 } else { 2266 /* Read crystal calibration. */ 2267 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2268 &sc->eeprom_crystal, sizeof (uint32_t)); 2269 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2270 le32toh(sc->eeprom_crystal)); 2271 } 2272 2273 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2274 2275 } 2276 2277 /* 2278 * Translate EEPROM flags to net80211. 2279 */ 2280 static uint32_t 2281 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2282 { 2283 uint32_t nflags; 2284 2285 nflags = 0; 2286 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2287 nflags |= IEEE80211_CHAN_PASSIVE; 2288 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2289 nflags |= IEEE80211_CHAN_NOADHOC; 2290 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2291 nflags |= IEEE80211_CHAN_DFS; 2292 /* XXX apparently IBSS may still be marked */ 2293 nflags |= IEEE80211_CHAN_NOADHOC; 2294 } 2295 2296 return nflags; 2297 } 2298 2299 static void 2300 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2301 { 2302 struct ifnet *ifp = sc->sc_ifp; 2303 struct ieee80211com *ic = ifp->if_l2com; 2304 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2305 const struct iwn_chan_band *band = &iwn_bands[n]; 2306 struct ieee80211_channel *c; 2307 uint8_t chan; 2308 int i, nflags; 2309 2310 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2311 2312 for (i = 0; i < band->nchan; i++) { 2313 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2314 DPRINTF(sc, IWN_DEBUG_RESET, 2315 "skip chan %d flags 0x%x maxpwr %d\n", 2316 band->chan[i], channels[i].flags, 2317 channels[i].maxpwr); 2318 continue; 2319 } 2320 chan = band->chan[i]; 2321 nflags = iwn_eeprom_channel_flags(&channels[i]); 2322 2323 c = &ic->ic_channels[ic->ic_nchans++]; 2324 c->ic_ieee = chan; 2325 c->ic_maxregpower = channels[i].maxpwr; 2326 c->ic_maxpower = 2*c->ic_maxregpower; 2327 2328 if (n == 0) { /* 2GHz band */ 2329 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2330 /* G =>'s B is supported */ 2331 c->ic_flags = IEEE80211_CHAN_B | nflags; 2332 c = &ic->ic_channels[ic->ic_nchans++]; 2333 c[0] = c[-1]; 2334 c->ic_flags = IEEE80211_CHAN_G | nflags; 2335 } else { /* 5GHz band */ 2336 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2337 c->ic_flags = IEEE80211_CHAN_A | nflags; 2338 } 2339 2340 /* Save maximum allowed TX power for this channel. */ 2341 sc->maxpwr[chan] = channels[i].maxpwr; 2342 2343 DPRINTF(sc, IWN_DEBUG_RESET, 2344 "add chan %d flags 0x%x maxpwr %d\n", chan, 2345 channels[i].flags, channels[i].maxpwr); 2346 2347 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2348 /* add HT20, HT40 added separately */ 2349 c = &ic->ic_channels[ic->ic_nchans++]; 2350 c[0] = c[-1]; 2351 c->ic_flags |= IEEE80211_CHAN_HT20; 2352 } 2353 } 2354 2355 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2356 2357 } 2358 2359 static void 2360 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2361 { 2362 struct ifnet *ifp = sc->sc_ifp; 2363 struct ieee80211com *ic = ifp->if_l2com; 2364 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2365 const struct iwn_chan_band *band = &iwn_bands[n]; 2366 struct ieee80211_channel *c, *cent, *extc; 2367 uint8_t chan; 2368 int i, nflags; 2369 2370 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2371 2372 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2373 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2374 return; 2375 } 2376 2377 for (i = 0; i < band->nchan; i++) { 2378 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2379 DPRINTF(sc, IWN_DEBUG_RESET, 2380 "skip chan %d flags 0x%x maxpwr %d\n", 2381 band->chan[i], channels[i].flags, 2382 channels[i].maxpwr); 2383 continue; 2384 } 2385 chan = band->chan[i]; 2386 nflags = iwn_eeprom_channel_flags(&channels[i]); 2387 2388 /* 2389 * Each entry defines an HT40 channel pair; find the 2390 * center channel, then the extension channel above. 2391 */ 2392 cent = ieee80211_find_channel_byieee(ic, chan, 2393 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2394 if (cent == NULL) { /* XXX shouldn't happen */ 2395 device_printf(sc->sc_dev, 2396 "%s: no entry for channel %d\n", __func__, chan); 2397 continue; 2398 } 2399 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2400 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2401 if (extc == NULL) { 2402 DPRINTF(sc, IWN_DEBUG_RESET, 2403 "%s: skip chan %d, extension channel not found\n", 2404 __func__, chan); 2405 continue; 2406 } 2407 2408 DPRINTF(sc, IWN_DEBUG_RESET, 2409 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2410 chan, channels[i].flags, channels[i].maxpwr); 2411 2412 c = &ic->ic_channels[ic->ic_nchans++]; 2413 c[0] = cent[0]; 2414 c->ic_extieee = extc->ic_ieee; 2415 c->ic_flags &= ~IEEE80211_CHAN_HT; 2416 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2417 c = &ic->ic_channels[ic->ic_nchans++]; 2418 c[0] = extc[0]; 2419 c->ic_extieee = cent->ic_ieee; 2420 c->ic_flags &= ~IEEE80211_CHAN_HT; 2421 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2422 } 2423 2424 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2425 2426 } 2427 2428 static void 2429 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2430 { 2431 struct ifnet *ifp = sc->sc_ifp; 2432 struct ieee80211com *ic = ifp->if_l2com; 2433 2434 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2435 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2436 2437 if (n < 5) 2438 iwn_read_eeprom_band(sc, n); 2439 else 2440 iwn_read_eeprom_ht40(sc, n); 2441 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2442 } 2443 2444 static struct iwn_eeprom_chan * 2445 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2446 { 2447 int band, chan, i, j; 2448 2449 if (IEEE80211_IS_CHAN_HT40(c)) { 2450 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2451 if (IEEE80211_IS_CHAN_HT40D(c)) 2452 chan = c->ic_extieee; 2453 else 2454 chan = c->ic_ieee; 2455 for (i = 0; i < iwn_bands[band].nchan; i++) { 2456 if (iwn_bands[band].chan[i] == chan) 2457 return &sc->eeprom_channels[band][i]; 2458 } 2459 } else { 2460 for (j = 0; j < 5; j++) { 2461 for (i = 0; i < iwn_bands[j].nchan; i++) { 2462 if (iwn_bands[j].chan[i] == c->ic_ieee) 2463 return &sc->eeprom_channels[j][i]; 2464 } 2465 } 2466 } 2467 return NULL; 2468 } 2469 2470 /* 2471 * Enforce flags read from EEPROM. 2472 */ 2473 static int 2474 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2475 int nchan, struct ieee80211_channel chans[]) 2476 { 2477 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2478 int i; 2479 2480 for (i = 0; i < nchan; i++) { 2481 struct ieee80211_channel *c = &chans[i]; 2482 struct iwn_eeprom_chan *channel; 2483 2484 channel = iwn_find_eeprom_channel(sc, c); 2485 if (channel == NULL) { 2486 if_printf(ic->ic_ifp, 2487 "%s: invalid channel %u freq %u/0x%x\n", 2488 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2489 return EINVAL; 2490 } 2491 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2492 } 2493 2494 return 0; 2495 } 2496 2497 static void 2498 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2499 { 2500 struct iwn_eeprom_enhinfo enhinfo[35]; 2501 struct ifnet *ifp = sc->sc_ifp; 2502 struct ieee80211com *ic = ifp->if_l2com; 2503 struct ieee80211_channel *c; 2504 uint16_t val, base; 2505 int8_t maxpwr; 2506 uint8_t flags; 2507 int i, j; 2508 2509 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2510 2511 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2512 base = le16toh(val); 2513 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2514 enhinfo, sizeof enhinfo); 2515 2516 for (i = 0; i < nitems(enhinfo); i++) { 2517 flags = enhinfo[i].flags; 2518 if (!(flags & IWN_ENHINFO_VALID)) 2519 continue; /* Skip invalid entries. */ 2520 2521 maxpwr = 0; 2522 if (sc->txchainmask & IWN_ANT_A) 2523 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2524 if (sc->txchainmask & IWN_ANT_B) 2525 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2526 if (sc->txchainmask & IWN_ANT_C) 2527 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2528 if (sc->ntxchains == 2) 2529 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2530 else if (sc->ntxchains == 3) 2531 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2532 2533 for (j = 0; j < ic->ic_nchans; j++) { 2534 c = &ic->ic_channels[j]; 2535 if ((flags & IWN_ENHINFO_5GHZ)) { 2536 if (!IEEE80211_IS_CHAN_A(c)) 2537 continue; 2538 } else if ((flags & IWN_ENHINFO_OFDM)) { 2539 if (!IEEE80211_IS_CHAN_G(c)) 2540 continue; 2541 } else if (!IEEE80211_IS_CHAN_B(c)) 2542 continue; 2543 if ((flags & IWN_ENHINFO_HT40)) { 2544 if (!IEEE80211_IS_CHAN_HT40(c)) 2545 continue; 2546 } else { 2547 if (IEEE80211_IS_CHAN_HT40(c)) 2548 continue; 2549 } 2550 if (enhinfo[i].chan != 0 && 2551 enhinfo[i].chan != c->ic_ieee) 2552 continue; 2553 2554 DPRINTF(sc, IWN_DEBUG_RESET, 2555 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2556 c->ic_flags, maxpwr / 2); 2557 c->ic_maxregpower = maxpwr / 2; 2558 c->ic_maxpower = maxpwr; 2559 } 2560 } 2561 2562 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2563 2564 } 2565 2566 static struct ieee80211_node * 2567 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2568 { 2569 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2570 } 2571 2572 static __inline int 2573 rate2plcp(int rate) 2574 { 2575 switch (rate & 0xff) { 2576 case 12: return 0xd; 2577 case 18: return 0xf; 2578 case 24: return 0x5; 2579 case 36: return 0x7; 2580 case 48: return 0x9; 2581 case 72: return 0xb; 2582 case 96: return 0x1; 2583 case 108: return 0x3; 2584 case 2: return 10; 2585 case 4: return 20; 2586 case 11: return 55; 2587 case 22: return 110; 2588 } 2589 return 0; 2590 } 2591 2592 /* 2593 * Calculate the required PLCP value from the given rate, 2594 * to the given node. 2595 * 2596 * This will take the node configuration (eg 11n, rate table 2597 * setup, etc) into consideration. 2598 */ 2599 static uint32_t 2600 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2601 uint8_t rate) 2602 { 2603 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2604 struct ieee80211com *ic = ni->ni_ic; 2605 uint8_t txant1, txant2; 2606 uint32_t plcp = 0; 2607 int ridx; 2608 2609 /* Use the first valid TX antenna. */ 2610 txant1 = IWN_LSB(sc->txchainmask); 2611 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2612 2613 /* 2614 * If it's an MCS rate, let's set the plcp correctly 2615 * and set the relevant flags based on the node config. 2616 */ 2617 if (rate & IEEE80211_RATE_MCS) { 2618 /* 2619 * Set the initial PLCP value to be between 0->31 for 2620 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2621 * flag. 2622 */ 2623 plcp = RV(rate) | IWN_RFLAG_MCS; 2624 2625 /* 2626 * XXX the following should only occur if both 2627 * the local configuration _and_ the remote node 2628 * advertise these capabilities. Thus this code 2629 * may need fixing! 2630 */ 2631 2632 /* 2633 * Set the channel width and guard interval. 2634 */ 2635 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2636 plcp |= IWN_RFLAG_HT40; 2637 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2638 plcp |= IWN_RFLAG_SGI; 2639 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2640 plcp |= IWN_RFLAG_SGI; 2641 } 2642 2643 /* 2644 * If it's a two stream rate, enable TX on both 2645 * antennas. 2646 * 2647 * XXX three stream rates? 2648 */ 2649 if (rate > 0x87) 2650 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2651 else 2652 plcp |= IWN_RFLAG_ANT(txant1); 2653 } else { 2654 /* 2655 * Set the initial PLCP - fine for both 2656 * OFDM and CCK rates. 2657 */ 2658 plcp = rate2plcp(rate); 2659 2660 /* Set CCK flag if it's CCK */ 2661 2662 /* XXX It would be nice to have a method 2663 * to map the ridx -> phy table entry 2664 * so we could just query that, rather than 2665 * this hack to check against IWN_RIDX_OFDM6. 2666 */ 2667 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2668 rate & IEEE80211_RATE_VAL); 2669 if (ridx < IWN_RIDX_OFDM6 && 2670 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2671 plcp |= IWN_RFLAG_CCK; 2672 2673 /* Set antenna configuration */ 2674 plcp |= IWN_RFLAG_ANT(txant1); 2675 } 2676 2677 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2678 __func__, 2679 rate, 2680 plcp); 2681 2682 return (htole32(plcp)); 2683 #undef RV 2684 } 2685 2686 static void 2687 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2688 { 2689 /* Doesn't do anything at the moment */ 2690 } 2691 2692 static int 2693 iwn_media_change(struct ifnet *ifp) 2694 { 2695 int error; 2696 2697 error = ieee80211_media_change(ifp); 2698 /* NB: only the fixed rate can change and that doesn't need a reset */ 2699 return (error == ENETRESET ? 0 : error); 2700 } 2701 2702 static int 2703 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2704 { 2705 struct iwn_vap *ivp = IWN_VAP(vap); 2706 struct ieee80211com *ic = vap->iv_ic; 2707 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2708 int error = 0; 2709 2710 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2711 2712 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2713 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2714 2715 IEEE80211_UNLOCK(ic); 2716 IWN_LOCK(sc); 2717 callout_stop(&sc->calib_to); 2718 2719 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2720 2721 switch (nstate) { 2722 case IEEE80211_S_ASSOC: 2723 if (vap->iv_state != IEEE80211_S_RUN) 2724 break; 2725 /* FALLTHROUGH */ 2726 case IEEE80211_S_AUTH: 2727 if (vap->iv_state == IEEE80211_S_AUTH) 2728 break; 2729 2730 /* 2731 * !AUTH -> AUTH transition requires state reset to handle 2732 * reassociations correctly. 2733 */ 2734 sc->rxon->associd = 0; 2735 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2736 sc->calib.state = IWN_CALIB_STATE_INIT; 2737 2738 if ((error = iwn_auth(sc, vap)) != 0) { 2739 device_printf(sc->sc_dev, 2740 "%s: could not move to auth state\n", __func__); 2741 } 2742 break; 2743 2744 case IEEE80211_S_RUN: 2745 /* 2746 * RUN -> RUN transition; Just restart the timers. 2747 */ 2748 if (vap->iv_state == IEEE80211_S_RUN) { 2749 sc->calib_cnt = 0; 2750 break; 2751 } 2752 2753 /* 2754 * !RUN -> RUN requires setting the association id 2755 * which is done with a firmware cmd. We also defer 2756 * starting the timers until that work is done. 2757 */ 2758 if ((error = iwn_run(sc, vap)) != 0) { 2759 device_printf(sc->sc_dev, 2760 "%s: could not move to run state\n", __func__); 2761 } 2762 break; 2763 2764 case IEEE80211_S_INIT: 2765 sc->calib.state = IWN_CALIB_STATE_INIT; 2766 break; 2767 2768 default: 2769 break; 2770 } 2771 IWN_UNLOCK(sc); 2772 IEEE80211_LOCK(ic); 2773 if (error != 0){ 2774 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2775 return error; 2776 } 2777 2778 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2779 2780 return ivp->iv_newstate(vap, nstate, arg); 2781 } 2782 2783 static void 2784 iwn_calib_timeout(void *arg) 2785 { 2786 struct iwn_softc *sc = arg; 2787 2788 IWN_LOCK_ASSERT(sc); 2789 2790 /* Force automatic TX power calibration every 60 secs. */ 2791 if (++sc->calib_cnt >= 120) { 2792 uint32_t flags = 0; 2793 2794 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2795 "sending request for statistics"); 2796 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2797 sizeof flags, 1); 2798 sc->calib_cnt = 0; 2799 } 2800 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2801 sc); 2802 } 2803 2804 /* 2805 * Process an RX_PHY firmware notification. This is usually immediately 2806 * followed by an MPDU_RX_DONE notification. 2807 */ 2808 static void 2809 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2810 struct iwn_rx_data *data) 2811 { 2812 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2813 2814 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2815 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2816 2817 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2818 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2819 sc->last_rx_valid = 1; 2820 } 2821 2822 /* 2823 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2824 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2825 */ 2826 static void 2827 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2828 struct iwn_rx_data *data) 2829 { 2830 struct iwn_ops *ops = &sc->ops; 2831 struct ifnet *ifp = sc->sc_ifp; 2832 struct ieee80211com *ic = ifp->if_l2com; 2833 struct iwn_rx_ring *ring = &sc->rxq; 2834 struct ieee80211_frame *wh; 2835 struct ieee80211_node *ni; 2836 struct mbuf *m, *m1; 2837 struct iwn_rx_stat *stat; 2838 caddr_t head; 2839 bus_addr_t paddr; 2840 uint32_t flags; 2841 int error, len, rssi, nf; 2842 2843 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2844 2845 if (desc->type == IWN_MPDU_RX_DONE) { 2846 /* Check for prior RX_PHY notification. */ 2847 if (!sc->last_rx_valid) { 2848 DPRINTF(sc, IWN_DEBUG_ANY, 2849 "%s: missing RX_PHY\n", __func__); 2850 return; 2851 } 2852 stat = &sc->last_rx_stat; 2853 } else 2854 stat = (struct iwn_rx_stat *)(desc + 1); 2855 2856 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2857 2858 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2859 device_printf(sc->sc_dev, 2860 "%s: invalid RX statistic header, len %d\n", __func__, 2861 stat->cfg_phy_len); 2862 return; 2863 } 2864 if (desc->type == IWN_MPDU_RX_DONE) { 2865 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2866 head = (caddr_t)(mpdu + 1); 2867 len = le16toh(mpdu->len); 2868 } else { 2869 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2870 len = le16toh(stat->len); 2871 } 2872 2873 flags = le32toh(*(uint32_t *)(head + len)); 2874 2875 /* Discard frames with a bad FCS early. */ 2876 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2877 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2878 __func__, flags); 2879 ifp->if_ierrors++; 2880 return; 2881 } 2882 /* Discard frames that are too short. */ 2883 if (len < sizeof (*wh)) { 2884 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2885 __func__, len); 2886 ifp->if_ierrors++; 2887 return; 2888 } 2889 2890 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2891 if (m1 == NULL) { 2892 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2893 __func__); 2894 ifp->if_ierrors++; 2895 return; 2896 } 2897 bus_dmamap_unload(ring->data_dmat, data->map); 2898 2899 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2900 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2901 if (error != 0 && error != EFBIG) { 2902 device_printf(sc->sc_dev, 2903 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2904 m_freem(m1); 2905 2906 /* Try to reload the old mbuf. */ 2907 error = bus_dmamap_load(ring->data_dmat, data->map, 2908 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2909 &paddr, BUS_DMA_NOWAIT); 2910 if (error != 0 && error != EFBIG) { 2911 panic("%s: could not load old RX mbuf", __func__); 2912 } 2913 /* Physical address may have changed. */ 2914 ring->desc[ring->cur] = htole32(paddr >> 8); 2915 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2916 BUS_DMASYNC_PREWRITE); 2917 ifp->if_ierrors++; 2918 return; 2919 } 2920 2921 m = data->m; 2922 data->m = m1; 2923 /* Update RX descriptor. */ 2924 ring->desc[ring->cur] = htole32(paddr >> 8); 2925 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2926 BUS_DMASYNC_PREWRITE); 2927 2928 /* Finalize mbuf. */ 2929 m->m_pkthdr.rcvif = ifp; 2930 m->m_data = head; 2931 m->m_pkthdr.len = m->m_len = len; 2932 2933 /* Grab a reference to the source node. */ 2934 wh = mtod(m, struct ieee80211_frame *); 2935 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2936 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2937 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2938 2939 rssi = ops->get_rssi(sc, stat); 2940 2941 if (ieee80211_radiotap_active(ic)) { 2942 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2943 2944 tap->wr_flags = 0; 2945 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2946 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2947 tap->wr_dbm_antsignal = (int8_t)rssi; 2948 tap->wr_dbm_antnoise = (int8_t)nf; 2949 tap->wr_tsft = stat->tstamp; 2950 switch (stat->rate) { 2951 /* CCK rates. */ 2952 case 10: tap->wr_rate = 2; break; 2953 case 20: tap->wr_rate = 4; break; 2954 case 55: tap->wr_rate = 11; break; 2955 case 110: tap->wr_rate = 22; break; 2956 /* OFDM rates. */ 2957 case 0xd: tap->wr_rate = 12; break; 2958 case 0xf: tap->wr_rate = 18; break; 2959 case 0x5: tap->wr_rate = 24; break; 2960 case 0x7: tap->wr_rate = 36; break; 2961 case 0x9: tap->wr_rate = 48; break; 2962 case 0xb: tap->wr_rate = 72; break; 2963 case 0x1: tap->wr_rate = 96; break; 2964 case 0x3: tap->wr_rate = 108; break; 2965 /* Unknown rate: should not happen. */ 2966 default: tap->wr_rate = 0; 2967 } 2968 } 2969 2970 IWN_UNLOCK(sc); 2971 2972 /* Send the frame to the 802.11 layer. */ 2973 if (ni != NULL) { 2974 if (ni->ni_flags & IEEE80211_NODE_HT) 2975 m->m_flags |= M_AMPDU; 2976 (void)ieee80211_input(ni, m, rssi - nf, nf); 2977 /* Node is no longer needed. */ 2978 ieee80211_free_node(ni); 2979 } else 2980 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 2981 2982 IWN_LOCK(sc); 2983 2984 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2985 2986 } 2987 2988 /* Process an incoming Compressed BlockAck. */ 2989 static void 2990 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2991 struct iwn_rx_data *data) 2992 { 2993 struct iwn_ops *ops = &sc->ops; 2994 struct ifnet *ifp = sc->sc_ifp; 2995 struct iwn_node *wn; 2996 struct ieee80211_node *ni; 2997 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2998 struct iwn_tx_ring *txq; 2999 struct iwn_tx_data *txdata; 3000 struct ieee80211_tx_ampdu *tap; 3001 struct mbuf *m; 3002 uint64_t bitmap; 3003 uint16_t ssn; 3004 uint8_t tid; 3005 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3006 3007 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3008 3009 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3010 3011 qid = le16toh(ba->qid); 3012 txq = &sc->txq[ba->qid]; 3013 tap = sc->qid2tap[ba->qid]; 3014 tid = tap->txa_tid; 3015 wn = (void *)tap->txa_ni; 3016 3017 res = NULL; 3018 ssn = 0; 3019 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3020 res = tap->txa_private; 3021 ssn = tap->txa_start & 0xfff; 3022 } 3023 3024 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3025 txdata = &txq->data[txq->read]; 3026 3027 /* Unmap and free mbuf. */ 3028 bus_dmamap_sync(txq->data_dmat, txdata->map, 3029 BUS_DMASYNC_POSTWRITE); 3030 bus_dmamap_unload(txq->data_dmat, txdata->map); 3031 m = txdata->m, txdata->m = NULL; 3032 ni = txdata->ni, txdata->ni = NULL; 3033 3034 KASSERT(ni != NULL, ("no node")); 3035 KASSERT(m != NULL, ("no mbuf")); 3036 3037 ieee80211_tx_complete(ni, m, 1); 3038 3039 txq->queued--; 3040 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3041 } 3042 3043 if (txq->queued == 0 && res != NULL) { 3044 iwn_nic_lock(sc); 3045 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3046 iwn_nic_unlock(sc); 3047 sc->qid2tap[qid] = NULL; 3048 free(res, M_DEVBUF); 3049 return; 3050 } 3051 3052 if (wn->agg[tid].bitmap == 0) 3053 return; 3054 3055 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3056 if (shift < 0) 3057 shift += 0x100; 3058 3059 if (wn->agg[tid].nframes > (64 - shift)) 3060 return; 3061 3062 ni = tap->txa_ni; 3063 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3064 for (i = 0; bitmap; i++) { 3065 if ((bitmap & 1) == 0) { 3066 ifp->if_oerrors++; 3067 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3068 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3069 } else { 3070 ifp->if_opackets++; 3071 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3072 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3073 } 3074 bitmap >>= 1; 3075 } 3076 3077 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3078 3079 } 3080 3081 /* 3082 * Process a CALIBRATION_RESULT notification sent by the initialization 3083 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3084 */ 3085 static void 3086 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3087 struct iwn_rx_data *data) 3088 { 3089 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3090 int len, idx = -1; 3091 3092 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3093 3094 /* Runtime firmware should not send such a notification. */ 3095 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3096 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3097 __func__); 3098 return; 3099 } 3100 len = (le32toh(desc->len) & 0x3fff) - 4; 3101 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3102 3103 switch (calib->code) { 3104 case IWN5000_PHY_CALIB_DC: 3105 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3106 idx = 0; 3107 break; 3108 case IWN5000_PHY_CALIB_LO: 3109 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3110 idx = 1; 3111 break; 3112 case IWN5000_PHY_CALIB_TX_IQ: 3113 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3114 idx = 2; 3115 break; 3116 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3117 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3118 idx = 3; 3119 break; 3120 case IWN5000_PHY_CALIB_BASE_BAND: 3121 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3122 idx = 4; 3123 break; 3124 } 3125 if (idx == -1) /* Ignore other results. */ 3126 return; 3127 3128 /* Save calibration result. */ 3129 if (sc->calibcmd[idx].buf != NULL) 3130 free(sc->calibcmd[idx].buf, M_DEVBUF); 3131 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 3132 if (sc->calibcmd[idx].buf == NULL) { 3133 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3134 "not enough memory for calibration result %d\n", 3135 calib->code); 3136 return; 3137 } 3138 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3139 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3140 sc->calibcmd[idx].len = len; 3141 memcpy(sc->calibcmd[idx].buf, calib, len); 3142 } 3143 3144 static void 3145 iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, 3146 struct iwn_stats *stats) 3147 { 3148 3149 /* XXX lock assert */ 3150 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3151 sc->last_stat_valid = 1; 3152 } 3153 3154 /* 3155 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3156 * The latter is sent by the firmware after each received beacon. 3157 */ 3158 static void 3159 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3160 struct iwn_rx_data *data) 3161 { 3162 struct iwn_ops *ops = &sc->ops; 3163 struct ifnet *ifp = sc->sc_ifp; 3164 struct ieee80211com *ic = ifp->if_l2com; 3165 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3166 struct iwn_calib_state *calib = &sc->calib; 3167 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3168 int temp; 3169 3170 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3171 3172 /* Ignore statistics received during a scan. */ 3173 if (vap->iv_state != IEEE80211_S_RUN || 3174 (ic->ic_flags & IEEE80211_F_SCAN)){ 3175 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3176 __func__); 3177 return; 3178 } 3179 3180 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3181 3182 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 3183 __func__, desc->type); 3184 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3185 3186 /* Collect/track general statistics for reporting */ 3187 iwn_stats_update(sc, calib, stats); 3188 3189 /* Test if temperature has changed. */ 3190 if (stats->general.temp != sc->rawtemp) { 3191 /* Convert "raw" temperature to degC. */ 3192 sc->rawtemp = stats->general.temp; 3193 temp = ops->get_temperature(sc); 3194 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3195 __func__, temp); 3196 3197 /* Update TX power if need be (4965AGN only). */ 3198 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3199 iwn4965_power_calibration(sc, temp); 3200 } 3201 3202 if (desc->type != IWN_BEACON_STATISTICS) 3203 return; /* Reply to a statistics request. */ 3204 3205 sc->noise = iwn_get_noise(&stats->rx.general); 3206 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3207 3208 /* Test that RSSI and noise are present in stats report. */ 3209 if (le32toh(stats->rx.general.flags) != 1) { 3210 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3211 "received statistics without RSSI"); 3212 return; 3213 } 3214 3215 if (calib->state == IWN_CALIB_STATE_ASSOC) 3216 iwn_collect_noise(sc, &stats->rx.general); 3217 else if (calib->state == IWN_CALIB_STATE_RUN) { 3218 iwn_tune_sensitivity(sc, &stats->rx); 3219 /* 3220 * XXX TODO: Only run the RX recovery if we're associated! 3221 */ 3222 iwn_check_rx_recovery(sc, stats); 3223 iwn_save_stats_counters(sc, stats); 3224 } 3225 3226 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3227 } 3228 3229 /* 3230 * Save the relevant statistic counters for the next calibration 3231 * pass. 3232 */ 3233 static void 3234 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3235 { 3236 struct iwn_calib_state *calib = &sc->calib; 3237 3238 /* Save counters values for next call. */ 3239 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3240 calib->fa_cck = le32toh(rs->rx.cck.fa); 3241 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3242 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3243 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3244 3245 /* Last time we received these tick values */ 3246 sc->last_calib_ticks = ticks; 3247 } 3248 3249 /* 3250 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3251 * and 5000 adapters have different incompatible TX status formats. 3252 */ 3253 static void 3254 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3255 struct iwn_rx_data *data) 3256 { 3257 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3258 struct iwn_tx_ring *ring; 3259 int qid; 3260 3261 qid = desc->qid & 0xf; 3262 ring = &sc->txq[qid]; 3263 3264 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3265 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3266 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3267 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3268 le32toh(stat->status)); 3269 3270 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3271 if (qid >= sc->firstaggqueue) { 3272 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3273 &stat->status); 3274 } else { 3275 iwn_tx_done(sc, desc, stat->ackfailcnt, 3276 le32toh(stat->status) & 0xff); 3277 } 3278 } 3279 3280 static void 3281 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3282 struct iwn_rx_data *data) 3283 { 3284 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3285 struct iwn_tx_ring *ring; 3286 int qid; 3287 3288 qid = desc->qid & 0xf; 3289 ring = &sc->txq[qid]; 3290 3291 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3292 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3293 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3294 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3295 le32toh(stat->status)); 3296 3297 #ifdef notyet 3298 /* Reset TX scheduler slot. */ 3299 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3300 #endif 3301 3302 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3303 if (qid >= sc->firstaggqueue) { 3304 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3305 &stat->status); 3306 } else { 3307 iwn_tx_done(sc, desc, stat->ackfailcnt, 3308 le16toh(stat->status) & 0xff); 3309 } 3310 } 3311 3312 /* 3313 * Adapter-independent backend for TX_DONE firmware notifications. 3314 */ 3315 static void 3316 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3317 uint8_t status) 3318 { 3319 struct ifnet *ifp = sc->sc_ifp; 3320 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3321 struct iwn_tx_data *data = &ring->data[desc->idx]; 3322 struct mbuf *m; 3323 struct ieee80211_node *ni; 3324 struct ieee80211vap *vap; 3325 3326 KASSERT(data->ni != NULL, ("no node")); 3327 3328 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3329 3330 /* Unmap and free mbuf. */ 3331 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3332 bus_dmamap_unload(ring->data_dmat, data->map); 3333 m = data->m, data->m = NULL; 3334 ni = data->ni, data->ni = NULL; 3335 vap = ni->ni_vap; 3336 3337 /* 3338 * Update rate control statistics for the node. 3339 */ 3340 if (status & IWN_TX_FAIL) { 3341 ifp->if_oerrors++; 3342 ieee80211_ratectl_tx_complete(vap, ni, 3343 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3344 } else { 3345 ifp->if_opackets++; 3346 ieee80211_ratectl_tx_complete(vap, ni, 3347 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3348 } 3349 3350 /* 3351 * Channels marked for "radar" require traffic to be received 3352 * to unlock before we can transmit. Until traffic is seen 3353 * any attempt to transmit is returned immediately with status 3354 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3355 * happen on first authenticate after scanning. To workaround 3356 * this we ignore a failure of this sort in AUTH state so the 3357 * 802.11 layer will fall back to using a timeout to wait for 3358 * the AUTH reply. This allows the firmware time to see 3359 * traffic so a subsequent retry of AUTH succeeds. It's 3360 * unclear why the firmware does not maintain state for 3361 * channels recently visited as this would allow immediate 3362 * use of the channel after a scan (where we see traffic). 3363 */ 3364 if (status == IWN_TX_FAIL_TX_LOCKED && 3365 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3366 ieee80211_tx_complete(ni, m, 0); 3367 else 3368 ieee80211_tx_complete(ni, m, 3369 (status & IWN_TX_FAIL) != 0); 3370 3371 sc->sc_tx_timer = 0; 3372 if (--ring->queued < IWN_TX_RING_LOMARK) { 3373 sc->qfullmsk &= ~(1 << ring->qid); 3374 if (sc->qfullmsk == 0 && 3375 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3376 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3377 iwn_start_locked(ifp); 3378 } 3379 } 3380 3381 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3382 3383 } 3384 3385 /* 3386 * Process a "command done" firmware notification. This is where we wakeup 3387 * processes waiting for a synchronous command completion. 3388 */ 3389 static void 3390 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3391 { 3392 struct iwn_tx_ring *ring; 3393 struct iwn_tx_data *data; 3394 int cmd_queue_num; 3395 3396 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3397 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3398 else 3399 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3400 3401 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3402 return; /* Not a command ack. */ 3403 3404 ring = &sc->txq[cmd_queue_num]; 3405 data = &ring->data[desc->idx]; 3406 3407 /* If the command was mapped in an mbuf, free it. */ 3408 if (data->m != NULL) { 3409 bus_dmamap_sync(ring->data_dmat, data->map, 3410 BUS_DMASYNC_POSTWRITE); 3411 bus_dmamap_unload(ring->data_dmat, data->map); 3412 m_freem(data->m); 3413 data->m = NULL; 3414 } 3415 wakeup(&ring->desc[desc->idx]); 3416 } 3417 3418 static void 3419 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3420 void *stat) 3421 { 3422 struct iwn_ops *ops = &sc->ops; 3423 struct ifnet *ifp = sc->sc_ifp; 3424 struct iwn_tx_ring *ring = &sc->txq[qid]; 3425 struct iwn_tx_data *data; 3426 struct mbuf *m; 3427 struct iwn_node *wn; 3428 struct ieee80211_node *ni; 3429 struct ieee80211_tx_ampdu *tap; 3430 uint64_t bitmap; 3431 uint32_t *status = stat; 3432 uint16_t *aggstatus = stat; 3433 uint16_t ssn; 3434 uint8_t tid; 3435 int bit, i, lastidx, *res, seqno, shift, start; 3436 3437 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3438 3439 if (nframes == 1) { 3440 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3441 #ifdef NOT_YET 3442 printf("ieee80211_send_bar()\n"); 3443 #endif 3444 /* 3445 * If we completely fail a transmit, make sure a 3446 * notification is pushed up to the rate control 3447 * layer. 3448 */ 3449 tap = sc->qid2tap[qid]; 3450 tid = tap->txa_tid; 3451 wn = (void *)tap->txa_ni; 3452 ni = tap->txa_ni; 3453 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3454 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3455 } 3456 } 3457 3458 bitmap = 0; 3459 start = idx; 3460 for (i = 0; i < nframes; i++) { 3461 if (le16toh(aggstatus[i * 2]) & 0xc) 3462 continue; 3463 3464 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3465 bit = idx - start; 3466 shift = 0; 3467 if (bit >= 64) { 3468 shift = 0x100 - idx + start; 3469 bit = 0; 3470 start = idx; 3471 } else if (bit <= -64) 3472 bit = 0x100 - start + idx; 3473 else if (bit < 0) { 3474 shift = start - idx; 3475 start = idx; 3476 bit = 0; 3477 } 3478 bitmap = bitmap << shift; 3479 bitmap |= 1ULL << bit; 3480 } 3481 tap = sc->qid2tap[qid]; 3482 tid = tap->txa_tid; 3483 wn = (void *)tap->txa_ni; 3484 wn->agg[tid].bitmap = bitmap; 3485 wn->agg[tid].startidx = start; 3486 wn->agg[tid].nframes = nframes; 3487 3488 res = NULL; 3489 ssn = 0; 3490 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3491 res = tap->txa_private; 3492 ssn = tap->txa_start & 0xfff; 3493 } 3494 3495 seqno = le32toh(*(status + nframes)) & 0xfff; 3496 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3497 data = &ring->data[ring->read]; 3498 3499 /* Unmap and free mbuf. */ 3500 bus_dmamap_sync(ring->data_dmat, data->map, 3501 BUS_DMASYNC_POSTWRITE); 3502 bus_dmamap_unload(ring->data_dmat, data->map); 3503 m = data->m, data->m = NULL; 3504 ni = data->ni, data->ni = NULL; 3505 3506 KASSERT(ni != NULL, ("no node")); 3507 KASSERT(m != NULL, ("no mbuf")); 3508 3509 ieee80211_tx_complete(ni, m, 1); 3510 3511 ring->queued--; 3512 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3513 } 3514 3515 if (ring->queued == 0 && res != NULL) { 3516 iwn_nic_lock(sc); 3517 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3518 iwn_nic_unlock(sc); 3519 sc->qid2tap[qid] = NULL; 3520 free(res, M_DEVBUF); 3521 return; 3522 } 3523 3524 sc->sc_tx_timer = 0; 3525 if (ring->queued < IWN_TX_RING_LOMARK) { 3526 sc->qfullmsk &= ~(1 << ring->qid); 3527 if (sc->qfullmsk == 0 && 3528 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3529 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3530 iwn_start_locked(ifp); 3531 } 3532 } 3533 3534 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3535 3536 } 3537 3538 /* 3539 * Process an INT_FH_RX or INT_SW_RX interrupt. 3540 */ 3541 static void 3542 iwn_notif_intr(struct iwn_softc *sc) 3543 { 3544 struct iwn_ops *ops = &sc->ops; 3545 struct ifnet *ifp = sc->sc_ifp; 3546 struct ieee80211com *ic = ifp->if_l2com; 3547 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3548 uint16_t hw; 3549 3550 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3551 BUS_DMASYNC_POSTREAD); 3552 3553 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3554 while (sc->rxq.cur != hw) { 3555 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3556 struct iwn_rx_desc *desc; 3557 3558 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3559 BUS_DMASYNC_POSTREAD); 3560 desc = mtod(data->m, struct iwn_rx_desc *); 3561 3562 DPRINTF(sc, IWN_DEBUG_RECV, 3563 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3564 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3565 desc->type, iwn_intr_str(desc->type), 3566 le16toh(desc->len)); 3567 3568 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3569 iwn_cmd_done(sc, desc); 3570 3571 switch (desc->type) { 3572 case IWN_RX_PHY: 3573 iwn_rx_phy(sc, desc, data); 3574 break; 3575 3576 case IWN_RX_DONE: /* 4965AGN only. */ 3577 case IWN_MPDU_RX_DONE: 3578 /* An 802.11 frame has been received. */ 3579 iwn_rx_done(sc, desc, data); 3580 break; 3581 3582 case IWN_RX_COMPRESSED_BA: 3583 /* A Compressed BlockAck has been received. */ 3584 iwn_rx_compressed_ba(sc, desc, data); 3585 break; 3586 3587 case IWN_TX_DONE: 3588 /* An 802.11 frame has been transmitted. */ 3589 ops->tx_done(sc, desc, data); 3590 break; 3591 3592 case IWN_RX_STATISTICS: 3593 case IWN_BEACON_STATISTICS: 3594 iwn_rx_statistics(sc, desc, data); 3595 break; 3596 3597 case IWN_BEACON_MISSED: 3598 { 3599 struct iwn_beacon_missed *miss = 3600 (struct iwn_beacon_missed *)(desc + 1); 3601 int misses; 3602 3603 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3604 BUS_DMASYNC_POSTREAD); 3605 misses = le32toh(miss->consecutive); 3606 3607 DPRINTF(sc, IWN_DEBUG_STATE, 3608 "%s: beacons missed %d/%d\n", __func__, 3609 misses, le32toh(miss->total)); 3610 /* 3611 * If more than 5 consecutive beacons are missed, 3612 * reinitialize the sensitivity state machine. 3613 */ 3614 if (vap->iv_state == IEEE80211_S_RUN && 3615 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3616 if (misses > 5) 3617 (void)iwn_init_sensitivity(sc); 3618 if (misses >= vap->iv_bmissthreshold) { 3619 IWN_UNLOCK(sc); 3620 ieee80211_beacon_miss(ic); 3621 IWN_LOCK(sc); 3622 } 3623 } 3624 break; 3625 } 3626 case IWN_UC_READY: 3627 { 3628 struct iwn_ucode_info *uc = 3629 (struct iwn_ucode_info *)(desc + 1); 3630 3631 /* The microcontroller is ready. */ 3632 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3633 BUS_DMASYNC_POSTREAD); 3634 DPRINTF(sc, IWN_DEBUG_RESET, 3635 "microcode alive notification version=%d.%d " 3636 "subtype=%x alive=%x\n", uc->major, uc->minor, 3637 uc->subtype, le32toh(uc->valid)); 3638 3639 if (le32toh(uc->valid) != 1) { 3640 device_printf(sc->sc_dev, 3641 "microcontroller initialization failed"); 3642 break; 3643 } 3644 if (uc->subtype == IWN_UCODE_INIT) { 3645 /* Save microcontroller report. */ 3646 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3647 } 3648 /* Save the address of the error log in SRAM. */ 3649 sc->errptr = le32toh(uc->errptr); 3650 break; 3651 } 3652 case IWN_STATE_CHANGED: 3653 { 3654 /* 3655 * State change allows hardware switch change to be 3656 * noted. However, we handle this in iwn_intr as we 3657 * get both the enable/disble intr. 3658 */ 3659 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3660 BUS_DMASYNC_POSTREAD); 3661 #ifdef IWN_DEBUG 3662 uint32_t *status = (uint32_t *)(desc + 1); 3663 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3664 "state changed to %x\n", 3665 le32toh(*status)); 3666 #endif 3667 break; 3668 } 3669 case IWN_START_SCAN: 3670 { 3671 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3672 BUS_DMASYNC_POSTREAD); 3673 #ifdef IWN_DEBUG 3674 struct iwn_start_scan *scan = 3675 (struct iwn_start_scan *)(desc + 1); 3676 DPRINTF(sc, IWN_DEBUG_ANY, 3677 "%s: scanning channel %d status %x\n", 3678 __func__, scan->chan, le32toh(scan->status)); 3679 #endif 3680 break; 3681 } 3682 case IWN_STOP_SCAN: 3683 { 3684 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3685 BUS_DMASYNC_POSTREAD); 3686 #ifdef IWN_DEBUG 3687 struct iwn_stop_scan *scan = 3688 (struct iwn_stop_scan *)(desc + 1); 3689 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3690 "scan finished nchan=%d status=%d chan=%d\n", 3691 scan->nchan, scan->status, scan->chan); 3692 #endif 3693 sc->sc_is_scanning = 0; 3694 IWN_UNLOCK(sc); 3695 ieee80211_scan_next(vap); 3696 IWN_LOCK(sc); 3697 break; 3698 } 3699 case IWN5000_CALIBRATION_RESULT: 3700 iwn5000_rx_calib_results(sc, desc, data); 3701 break; 3702 3703 case IWN5000_CALIBRATION_DONE: 3704 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3705 wakeup(sc); 3706 break; 3707 } 3708 3709 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3710 } 3711 3712 /* Tell the firmware what we have processed. */ 3713 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3714 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3715 } 3716 3717 /* 3718 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3719 * from power-down sleep mode. 3720 */ 3721 static void 3722 iwn_wakeup_intr(struct iwn_softc *sc) 3723 { 3724 int qid; 3725 3726 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3727 __func__); 3728 3729 /* Wakeup RX and TX rings. */ 3730 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3731 for (qid = 0; qid < sc->ntxqs; qid++) { 3732 struct iwn_tx_ring *ring = &sc->txq[qid]; 3733 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3734 } 3735 } 3736 3737 static void 3738 iwn_rftoggle_intr(struct iwn_softc *sc) 3739 { 3740 struct ifnet *ifp = sc->sc_ifp; 3741 struct ieee80211com *ic = ifp->if_l2com; 3742 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3743 3744 IWN_LOCK_ASSERT(sc); 3745 3746 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3747 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3748 if (tmp & IWN_GP_CNTRL_RFKILL) 3749 ieee80211_runtask(ic, &sc->sc_radioon_task); 3750 else 3751 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3752 } 3753 3754 /* 3755 * Dump the error log of the firmware when a firmware panic occurs. Although 3756 * we can't debug the firmware because it is neither open source nor free, it 3757 * can help us to identify certain classes of problems. 3758 */ 3759 static void 3760 iwn_fatal_intr(struct iwn_softc *sc) 3761 { 3762 struct iwn_fw_dump dump; 3763 int i; 3764 3765 IWN_LOCK_ASSERT(sc); 3766 3767 /* Force a complete recalibration on next init. */ 3768 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3769 3770 /* Check that the error log address is valid. */ 3771 if (sc->errptr < IWN_FW_DATA_BASE || 3772 sc->errptr + sizeof (dump) > 3773 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3774 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3775 sc->errptr); 3776 return; 3777 } 3778 if (iwn_nic_lock(sc) != 0) { 3779 printf("%s: could not read firmware error log\n", __func__); 3780 return; 3781 } 3782 /* Read firmware error log from SRAM. */ 3783 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3784 sizeof (dump) / sizeof (uint32_t)); 3785 iwn_nic_unlock(sc); 3786 3787 if (dump.valid == 0) { 3788 printf("%s: firmware error log is empty\n", __func__); 3789 return; 3790 } 3791 printf("firmware error log:\n"); 3792 printf(" error type = \"%s\" (0x%08X)\n", 3793 (dump.id < nitems(iwn_fw_errmsg)) ? 3794 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3795 dump.id); 3796 printf(" program counter = 0x%08X\n", dump.pc); 3797 printf(" source line = 0x%08X\n", dump.src_line); 3798 printf(" error data = 0x%08X%08X\n", 3799 dump.error_data[0], dump.error_data[1]); 3800 printf(" branch link = 0x%08X%08X\n", 3801 dump.branch_link[0], dump.branch_link[1]); 3802 printf(" interrupt link = 0x%08X%08X\n", 3803 dump.interrupt_link[0], dump.interrupt_link[1]); 3804 printf(" time = %u\n", dump.time[0]); 3805 3806 /* Dump driver status (TX and RX rings) while we're here. */ 3807 printf("driver status:\n"); 3808 for (i = 0; i < sc->ntxqs; i++) { 3809 struct iwn_tx_ring *ring = &sc->txq[i]; 3810 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3811 i, ring->qid, ring->cur, ring->queued); 3812 } 3813 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3814 } 3815 3816 static void 3817 iwn_intr(void *arg) 3818 { 3819 struct iwn_softc *sc = arg; 3820 struct ifnet *ifp = sc->sc_ifp; 3821 uint32_t r1, r2, tmp; 3822 3823 IWN_LOCK(sc); 3824 3825 /* Disable interrupts. */ 3826 IWN_WRITE(sc, IWN_INT_MASK, 0); 3827 3828 /* Read interrupts from ICT (fast) or from registers (slow). */ 3829 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3830 tmp = 0; 3831 while (sc->ict[sc->ict_cur] != 0) { 3832 tmp |= sc->ict[sc->ict_cur]; 3833 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3834 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3835 } 3836 tmp = le32toh(tmp); 3837 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3838 tmp = 0; 3839 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3840 tmp |= 0x8000; 3841 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3842 r2 = 0; /* Unused. */ 3843 } else { 3844 r1 = IWN_READ(sc, IWN_INT); 3845 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3846 return; /* Hardware gone! */ 3847 r2 = IWN_READ(sc, IWN_FH_INT); 3848 } 3849 3850 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3851 , r1, r2); 3852 3853 if (r1 == 0 && r2 == 0) 3854 goto done; /* Interrupt not for us. */ 3855 3856 /* Acknowledge interrupts. */ 3857 IWN_WRITE(sc, IWN_INT, r1); 3858 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3859 IWN_WRITE(sc, IWN_FH_INT, r2); 3860 3861 if (r1 & IWN_INT_RF_TOGGLED) { 3862 iwn_rftoggle_intr(sc); 3863 goto done; 3864 } 3865 if (r1 & IWN_INT_CT_REACHED) { 3866 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3867 __func__); 3868 } 3869 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3870 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3871 __func__); 3872 #ifdef IWN_DEBUG 3873 iwn_debug_register(sc); 3874 #endif 3875 /* Dump firmware error log and stop. */ 3876 iwn_fatal_intr(sc); 3877 ifp->if_flags &= ~IFF_UP; 3878 iwn_stop_locked(sc); 3879 goto done; 3880 } 3881 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3882 (r2 & IWN_FH_INT_RX)) { 3883 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3884 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3885 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3886 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3887 IWN_INT_PERIODIC_DIS); 3888 iwn_notif_intr(sc); 3889 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3890 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3891 IWN_INT_PERIODIC_ENA); 3892 } 3893 } else 3894 iwn_notif_intr(sc); 3895 } 3896 3897 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3898 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3899 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3900 wakeup(sc); /* FH DMA transfer completed. */ 3901 } 3902 3903 if (r1 & IWN_INT_ALIVE) 3904 wakeup(sc); /* Firmware is alive. */ 3905 3906 if (r1 & IWN_INT_WAKEUP) 3907 iwn_wakeup_intr(sc); 3908 3909 done: 3910 /* Re-enable interrupts. */ 3911 if (ifp->if_flags & IFF_UP) 3912 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3913 3914 IWN_UNLOCK(sc); 3915 } 3916 3917 /* 3918 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3919 * 5000 adapters use a slightly different format). 3920 */ 3921 static void 3922 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3923 uint16_t len) 3924 { 3925 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3926 3927 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3928 3929 *w = htole16(len + 8); 3930 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3931 BUS_DMASYNC_PREWRITE); 3932 if (idx < IWN_SCHED_WINSZ) { 3933 *(w + IWN_TX_RING_COUNT) = *w; 3934 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3935 BUS_DMASYNC_PREWRITE); 3936 } 3937 } 3938 3939 static void 3940 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3941 uint16_t len) 3942 { 3943 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3944 3945 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3946 3947 *w = htole16(id << 12 | (len + 8)); 3948 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3949 BUS_DMASYNC_PREWRITE); 3950 if (idx < IWN_SCHED_WINSZ) { 3951 *(w + IWN_TX_RING_COUNT) = *w; 3952 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3953 BUS_DMASYNC_PREWRITE); 3954 } 3955 } 3956 3957 #ifdef notyet 3958 static void 3959 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3960 { 3961 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3962 3963 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3964 3965 *w = (*w & htole16(0xf000)) | htole16(1); 3966 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3967 BUS_DMASYNC_PREWRITE); 3968 if (idx < IWN_SCHED_WINSZ) { 3969 *(w + IWN_TX_RING_COUNT) = *w; 3970 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3971 BUS_DMASYNC_PREWRITE); 3972 } 3973 } 3974 #endif 3975 3976 /* 3977 * Check whether OFDM 11g protection will be enabled for the given rate. 3978 * 3979 * The original driver code only enabled protection for OFDM rates. 3980 * It didn't check to see whether it was operating in 11a or 11bg mode. 3981 */ 3982 static int 3983 iwn_check_rate_needs_protection(struct iwn_softc *sc, 3984 struct ieee80211vap *vap, uint8_t rate) 3985 { 3986 struct ieee80211com *ic = vap->iv_ic; 3987 3988 /* 3989 * Not in 2GHz mode? Then there's no need to enable OFDM 3990 * 11bg protection. 3991 */ 3992 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 3993 return (0); 3994 } 3995 3996 /* 3997 * 11bg protection not enabled? Then don't use it. 3998 */ 3999 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 4000 return (0); 4001 4002 /* 4003 * If it's an 11n rate, then for now we enable 4004 * protection. 4005 */ 4006 if (rate & IEEE80211_RATE_MCS) { 4007 return (1); 4008 } 4009 4010 /* 4011 * Do a rate table lookup. If the PHY is CCK, 4012 * don't do protection. 4013 */ 4014 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4015 return (0); 4016 4017 /* 4018 * Yup, enable protection. 4019 */ 4020 return (1); 4021 } 4022 4023 /* 4024 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4025 * the link quality table that reflects this particular entry. 4026 */ 4027 static int 4028 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4029 uint8_t rate) 4030 { 4031 struct ieee80211_rateset *rs; 4032 int is_11n; 4033 int nr; 4034 int i; 4035 uint8_t cmp_rate; 4036 4037 /* 4038 * Figure out if we're using 11n or not here. 4039 */ 4040 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4041 is_11n = 1; 4042 else 4043 is_11n = 0; 4044 4045 /* 4046 * Use the correct rate table. 4047 */ 4048 if (is_11n) { 4049 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4050 nr = ni->ni_htrates.rs_nrates; 4051 } else { 4052 rs = &ni->ni_rates; 4053 nr = rs->rs_nrates; 4054 } 4055 4056 /* 4057 * Find the relevant link quality entry in the table. 4058 */ 4059 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4060 /* 4061 * The link quality table index starts at 0 == highest 4062 * rate, so we walk the rate table backwards. 4063 */ 4064 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4065 if (rate & IEEE80211_RATE_MCS) 4066 cmp_rate |= IEEE80211_RATE_MCS; 4067 4068 #if 0 4069 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4070 __func__, 4071 i, 4072 nr, 4073 rate, 4074 cmp_rate); 4075 #endif 4076 4077 if (cmp_rate == rate) 4078 return (i); 4079 } 4080 4081 /* Failed? Start at the end */ 4082 return (IWN_MAX_TX_RETRIES - 1); 4083 } 4084 4085 static int 4086 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4087 { 4088 struct iwn_ops *ops = &sc->ops; 4089 const struct ieee80211_txparam *tp; 4090 struct ieee80211vap *vap = ni->ni_vap; 4091 struct ieee80211com *ic = ni->ni_ic; 4092 struct iwn_node *wn = (void *)ni; 4093 struct iwn_tx_ring *ring; 4094 struct iwn_tx_desc *desc; 4095 struct iwn_tx_data *data; 4096 struct iwn_tx_cmd *cmd; 4097 struct iwn_cmd_data *tx; 4098 struct ieee80211_frame *wh; 4099 struct ieee80211_key *k = NULL; 4100 struct mbuf *m1; 4101 uint32_t flags; 4102 uint16_t qos; 4103 u_int hdrlen; 4104 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4105 uint8_t tid, type; 4106 int ac, i, totlen, error, pad, nsegs = 0, rate; 4107 4108 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4109 4110 IWN_LOCK_ASSERT(sc); 4111 4112 wh = mtod(m, struct ieee80211_frame *); 4113 hdrlen = ieee80211_anyhdrsize(wh); 4114 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4115 4116 /* Select EDCA Access Category and TX ring for this frame. */ 4117 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4118 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4119 tid = qos & IEEE80211_QOS_TID; 4120 } else { 4121 qos = 0; 4122 tid = 0; 4123 } 4124 ac = M_WME_GETAC(m); 4125 if (m->m_flags & M_AMPDU_MPDU) { 4126 uint16_t seqno; 4127 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4128 4129 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4130 m_freem(m); 4131 return EINVAL; 4132 } 4133 4134 /* 4135 * Queue this frame to the hardware ring that we've 4136 * negotiated AMPDU TX on. 4137 * 4138 * Note that the sequence number must match the TX slot 4139 * being used! 4140 */ 4141 ac = *(int *)tap->txa_private; 4142 seqno = ni->ni_txseqs[tid]; 4143 *(uint16_t *)wh->i_seq = 4144 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4145 ring = &sc->txq[ac]; 4146 if ((seqno % 256) != ring->cur) { 4147 device_printf(sc->sc_dev, 4148 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4149 __func__, 4150 m, 4151 seqno, 4152 seqno % 256, 4153 ring->cur); 4154 } 4155 ni->ni_txseqs[tid]++; 4156 } 4157 ring = &sc->txq[ac]; 4158 desc = &ring->desc[ring->cur]; 4159 data = &ring->data[ring->cur]; 4160 4161 /* Choose a TX rate index. */ 4162 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4163 if (type == IEEE80211_FC0_TYPE_MGT) 4164 rate = tp->mgmtrate; 4165 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4166 rate = tp->mcastrate; 4167 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4168 rate = tp->ucastrate; 4169 else if (m->m_flags & M_EAPOL) 4170 rate = tp->mgmtrate; 4171 else { 4172 /* XXX pass pktlen */ 4173 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4174 rate = ni->ni_txrate; 4175 } 4176 4177 /* Encrypt the frame if need be. */ 4178 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 4179 /* Retrieve key for TX. */ 4180 k = ieee80211_crypto_encap(ni, m); 4181 if (k == NULL) { 4182 m_freem(m); 4183 return ENOBUFS; 4184 } 4185 /* 802.11 header may have moved. */ 4186 wh = mtod(m, struct ieee80211_frame *); 4187 } 4188 totlen = m->m_pkthdr.len; 4189 4190 if (ieee80211_radiotap_active_vap(vap)) { 4191 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4192 4193 tap->wt_flags = 0; 4194 tap->wt_rate = rate; 4195 if (k != NULL) 4196 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4197 4198 ieee80211_radiotap_tx(vap, m); 4199 } 4200 4201 /* Prepare TX firmware command. */ 4202 cmd = &ring->cmd[ring->cur]; 4203 cmd->code = IWN_CMD_TX_DATA; 4204 cmd->flags = 0; 4205 cmd->qid = ring->qid; 4206 cmd->idx = ring->cur; 4207 4208 tx = (struct iwn_cmd_data *)cmd->data; 4209 /* NB: No need to clear tx, all fields are reinitialized here. */ 4210 tx->scratch = 0; /* clear "scratch" area */ 4211 4212 flags = 0; 4213 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4214 /* Unicast frame, check if an ACK is expected. */ 4215 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4216 IEEE80211_QOS_ACKPOLICY_NOACK) 4217 flags |= IWN_TX_NEED_ACK; 4218 } 4219 if ((wh->i_fc[0] & 4220 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4221 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4222 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4223 4224 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4225 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4226 4227 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4228 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4229 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4230 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4231 flags |= IWN_TX_NEED_RTS; 4232 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4233 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4234 flags |= IWN_TX_NEED_CTS; 4235 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4236 flags |= IWN_TX_NEED_RTS; 4237 } 4238 4239 /* XXX HT protection? */ 4240 4241 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4242 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4243 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4244 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4245 flags |= IWN_TX_NEED_PROTECTION; 4246 } else 4247 flags |= IWN_TX_FULL_TXOP; 4248 } 4249 } 4250 4251 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4252 type != IEEE80211_FC0_TYPE_DATA) 4253 tx->id = sc->broadcast_id; 4254 else 4255 tx->id = wn->id; 4256 4257 if (type == IEEE80211_FC0_TYPE_MGT) { 4258 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4259 4260 /* Tell HW to set timestamp in probe responses. */ 4261 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4262 flags |= IWN_TX_INSERT_TSTAMP; 4263 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4264 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4265 tx->timeout = htole16(3); 4266 else 4267 tx->timeout = htole16(2); 4268 } else 4269 tx->timeout = htole16(0); 4270 4271 if (hdrlen & 3) { 4272 /* First segment length must be a multiple of 4. */ 4273 flags |= IWN_TX_NEED_PADDING; 4274 pad = 4 - (hdrlen & 3); 4275 } else 4276 pad = 0; 4277 4278 tx->len = htole16(totlen); 4279 tx->tid = tid; 4280 tx->rts_ntries = 60; 4281 tx->data_ntries = 15; 4282 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4283 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4284 if (tx->id == sc->broadcast_id) { 4285 /* Group or management frame. */ 4286 tx->linkq = 0; 4287 } else { 4288 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4289 flags |= IWN_TX_LINKQ; /* enable MRR */ 4290 } 4291 4292 /* Set physical address of "scratch area". */ 4293 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4294 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4295 4296 /* Copy 802.11 header in TX command. */ 4297 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4298 4299 /* Trim 802.11 header. */ 4300 m_adj(m, hdrlen); 4301 tx->security = 0; 4302 tx->flags = htole32(flags); 4303 4304 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4305 &nsegs, BUS_DMA_NOWAIT); 4306 if (error != 0) { 4307 if (error != EFBIG) { 4308 device_printf(sc->sc_dev, 4309 "%s: can't map mbuf (error %d)\n", __func__, error); 4310 m_freem(m); 4311 return error; 4312 } 4313 /* Too many DMA segments, linearize mbuf. */ 4314 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4315 if (m1 == NULL) { 4316 device_printf(sc->sc_dev, 4317 "%s: could not defrag mbuf\n", __func__); 4318 m_freem(m); 4319 return ENOBUFS; 4320 } 4321 m = m1; 4322 4323 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4324 segs, &nsegs, BUS_DMA_NOWAIT); 4325 if (error != 0) { 4326 device_printf(sc->sc_dev, 4327 "%s: can't map mbuf (error %d)\n", __func__, error); 4328 m_freem(m); 4329 return error; 4330 } 4331 } 4332 4333 data->m = m; 4334 data->ni = ni; 4335 4336 DPRINTF(sc, IWN_DEBUG_XMIT, 4337 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4338 __func__, 4339 ring->qid, 4340 ring->cur, 4341 m->m_pkthdr.len, 4342 nsegs, 4343 rate, 4344 tx->rate); 4345 4346 /* Fill TX descriptor. */ 4347 desc->nsegs = 1; 4348 if (m->m_len != 0) 4349 desc->nsegs += nsegs; 4350 /* First DMA segment is used by the TX command. */ 4351 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4352 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4353 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4354 /* Other DMA segments are for data payload. */ 4355 seg = &segs[0]; 4356 for (i = 1; i <= nsegs; i++) { 4357 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4358 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4359 seg->ds_len << 4); 4360 seg++; 4361 } 4362 4363 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4364 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4365 BUS_DMASYNC_PREWRITE); 4366 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4367 BUS_DMASYNC_PREWRITE); 4368 4369 /* Update TX scheduler. */ 4370 if (ring->qid >= sc->firstaggqueue) 4371 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4372 4373 /* Kick TX ring. */ 4374 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4375 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4376 4377 /* Mark TX ring as full if we reach a certain threshold. */ 4378 if (++ring->queued > IWN_TX_RING_HIMARK) 4379 sc->qfullmsk |= 1 << ring->qid; 4380 4381 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4382 4383 return 0; 4384 } 4385 4386 static int 4387 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4388 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4389 { 4390 struct iwn_ops *ops = &sc->ops; 4391 // struct ifnet *ifp = sc->sc_ifp; 4392 struct ieee80211vap *vap = ni->ni_vap; 4393 // struct ieee80211com *ic = ifp->if_l2com; 4394 struct iwn_tx_cmd *cmd; 4395 struct iwn_cmd_data *tx; 4396 struct ieee80211_frame *wh; 4397 struct iwn_tx_ring *ring; 4398 struct iwn_tx_desc *desc; 4399 struct iwn_tx_data *data; 4400 struct mbuf *m1; 4401 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4402 uint32_t flags; 4403 u_int hdrlen; 4404 int ac, totlen, error, pad, nsegs = 0, i, rate; 4405 uint8_t type; 4406 4407 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4408 4409 IWN_LOCK_ASSERT(sc); 4410 4411 wh = mtod(m, struct ieee80211_frame *); 4412 hdrlen = ieee80211_anyhdrsize(wh); 4413 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4414 4415 ac = params->ibp_pri & 3; 4416 4417 ring = &sc->txq[ac]; 4418 desc = &ring->desc[ring->cur]; 4419 data = &ring->data[ring->cur]; 4420 4421 /* Choose a TX rate. */ 4422 rate = params->ibp_rate0; 4423 totlen = m->m_pkthdr.len; 4424 4425 /* Prepare TX firmware command. */ 4426 cmd = &ring->cmd[ring->cur]; 4427 cmd->code = IWN_CMD_TX_DATA; 4428 cmd->flags = 0; 4429 cmd->qid = ring->qid; 4430 cmd->idx = ring->cur; 4431 4432 tx = (struct iwn_cmd_data *)cmd->data; 4433 /* NB: No need to clear tx, all fields are reinitialized here. */ 4434 tx->scratch = 0; /* clear "scratch" area */ 4435 4436 flags = 0; 4437 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4438 flags |= IWN_TX_NEED_ACK; 4439 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4440 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4441 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4442 flags &= ~IWN_TX_NEED_RTS; 4443 flags |= IWN_TX_NEED_PROTECTION; 4444 } else 4445 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4446 } 4447 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4448 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4449 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4450 flags &= ~IWN_TX_NEED_CTS; 4451 flags |= IWN_TX_NEED_PROTECTION; 4452 } else 4453 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4454 } 4455 if (type == IEEE80211_FC0_TYPE_MGT) { 4456 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4457 4458 /* Tell HW to set timestamp in probe responses. */ 4459 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4460 flags |= IWN_TX_INSERT_TSTAMP; 4461 4462 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4463 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4464 tx->timeout = htole16(3); 4465 else 4466 tx->timeout = htole16(2); 4467 } else 4468 tx->timeout = htole16(0); 4469 4470 if (hdrlen & 3) { 4471 /* First segment length must be a multiple of 4. */ 4472 flags |= IWN_TX_NEED_PADDING; 4473 pad = 4 - (hdrlen & 3); 4474 } else 4475 pad = 0; 4476 4477 if (ieee80211_radiotap_active_vap(vap)) { 4478 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4479 4480 tap->wt_flags = 0; 4481 tap->wt_rate = rate; 4482 4483 ieee80211_radiotap_tx(vap, m); 4484 } 4485 4486 tx->len = htole16(totlen); 4487 tx->tid = 0; 4488 tx->id = sc->broadcast_id; 4489 tx->rts_ntries = params->ibp_try1; 4490 tx->data_ntries = params->ibp_try0; 4491 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4492 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4493 4494 /* Group or management frame. */ 4495 tx->linkq = 0; 4496 4497 /* Set physical address of "scratch area". */ 4498 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4499 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4500 4501 /* Copy 802.11 header in TX command. */ 4502 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4503 4504 /* Trim 802.11 header. */ 4505 m_adj(m, hdrlen); 4506 tx->security = 0; 4507 tx->flags = htole32(flags); 4508 4509 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4510 &nsegs, BUS_DMA_NOWAIT); 4511 if (error != 0) { 4512 if (error != EFBIG) { 4513 device_printf(sc->sc_dev, 4514 "%s: can't map mbuf (error %d)\n", __func__, error); 4515 m_freem(m); 4516 return error; 4517 } 4518 /* Too many DMA segments, linearize mbuf. */ 4519 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4520 if (m1 == NULL) { 4521 device_printf(sc->sc_dev, 4522 "%s: could not defrag mbuf\n", __func__); 4523 m_freem(m); 4524 return ENOBUFS; 4525 } 4526 m = m1; 4527 4528 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4529 segs, &nsegs, BUS_DMA_NOWAIT); 4530 if (error != 0) { 4531 device_printf(sc->sc_dev, 4532 "%s: can't map mbuf (error %d)\n", __func__, error); 4533 m_freem(m); 4534 return error; 4535 } 4536 } 4537 4538 data->m = m; 4539 data->ni = ni; 4540 4541 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4542 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4543 4544 /* Fill TX descriptor. */ 4545 desc->nsegs = 1; 4546 if (m->m_len != 0) 4547 desc->nsegs += nsegs; 4548 /* First DMA segment is used by the TX command. */ 4549 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4550 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4551 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4552 /* Other DMA segments are for data payload. */ 4553 seg = &segs[0]; 4554 for (i = 1; i <= nsegs; i++) { 4555 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4556 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4557 seg->ds_len << 4); 4558 seg++; 4559 } 4560 4561 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4562 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4563 BUS_DMASYNC_PREWRITE); 4564 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4565 BUS_DMASYNC_PREWRITE); 4566 4567 /* Update TX scheduler. */ 4568 if (ring->qid >= sc->firstaggqueue) 4569 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4570 4571 /* Kick TX ring. */ 4572 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4573 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4574 4575 /* Mark TX ring as full if we reach a certain threshold. */ 4576 if (++ring->queued > IWN_TX_RING_HIMARK) 4577 sc->qfullmsk |= 1 << ring->qid; 4578 4579 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4580 4581 return 0; 4582 } 4583 4584 static int 4585 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4586 const struct ieee80211_bpf_params *params) 4587 { 4588 struct ieee80211com *ic = ni->ni_ic; 4589 struct ifnet *ifp = ic->ic_ifp; 4590 struct iwn_softc *sc = ifp->if_softc; 4591 int error = 0; 4592 4593 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4594 4595 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4596 ieee80211_free_node(ni); 4597 m_freem(m); 4598 return ENETDOWN; 4599 } 4600 4601 IWN_LOCK(sc); 4602 if (params == NULL) { 4603 /* 4604 * Legacy path; interpret frame contents to decide 4605 * precisely how to send the frame. 4606 */ 4607 error = iwn_tx_data(sc, m, ni); 4608 } else { 4609 /* 4610 * Caller supplied explicit parameters to use in 4611 * sending the frame. 4612 */ 4613 error = iwn_tx_data_raw(sc, m, ni, params); 4614 } 4615 if (error != 0) { 4616 /* NB: m is reclaimed on tx failure */ 4617 ieee80211_free_node(ni); 4618 ifp->if_oerrors++; 4619 } 4620 sc->sc_tx_timer = 5; 4621 4622 IWN_UNLOCK(sc); 4623 4624 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4625 4626 return error; 4627 } 4628 4629 static void 4630 iwn_start(struct ifnet *ifp) 4631 { 4632 struct iwn_softc *sc = ifp->if_softc; 4633 4634 IWN_LOCK(sc); 4635 iwn_start_locked(ifp); 4636 IWN_UNLOCK(sc); 4637 } 4638 4639 static void 4640 iwn_start_locked(struct ifnet *ifp) 4641 { 4642 struct iwn_softc *sc = ifp->if_softc; 4643 struct ieee80211_node *ni; 4644 struct mbuf *m; 4645 4646 IWN_LOCK_ASSERT(sc); 4647 4648 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4649 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4650 return; 4651 4652 for (;;) { 4653 if (sc->qfullmsk != 0) { 4654 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4655 break; 4656 } 4657 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4658 if (m == NULL) 4659 break; 4660 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4661 if (iwn_tx_data(sc, m, ni) != 0) { 4662 ieee80211_free_node(ni); 4663 ifp->if_oerrors++; 4664 continue; 4665 } 4666 sc->sc_tx_timer = 5; 4667 } 4668 } 4669 4670 static void 4671 iwn_watchdog(void *arg) 4672 { 4673 struct iwn_softc *sc = arg; 4674 struct ifnet *ifp = sc->sc_ifp; 4675 struct ieee80211com *ic = ifp->if_l2com; 4676 4677 IWN_LOCK_ASSERT(sc); 4678 4679 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4680 4681 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4682 4683 if (sc->sc_tx_timer > 0) { 4684 if (--sc->sc_tx_timer == 0) { 4685 if_printf(ifp, "device timeout\n"); 4686 ieee80211_runtask(ic, &sc->sc_reinit_task); 4687 return; 4688 } 4689 } 4690 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4691 } 4692 4693 static int 4694 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4695 { 4696 struct iwn_softc *sc = ifp->if_softc; 4697 struct ieee80211com *ic = ifp->if_l2com; 4698 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4699 struct ifreq *ifr = (struct ifreq *) data; 4700 int error = 0, startall = 0, stop = 0; 4701 4702 switch (cmd) { 4703 case SIOCGIFADDR: 4704 error = ether_ioctl(ifp, cmd, data); 4705 break; 4706 case SIOCSIFFLAGS: 4707 IWN_LOCK(sc); 4708 if (ifp->if_flags & IFF_UP) { 4709 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4710 iwn_init_locked(sc); 4711 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4712 startall = 1; 4713 else 4714 stop = 1; 4715 } 4716 } else { 4717 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4718 iwn_stop_locked(sc); 4719 } 4720 IWN_UNLOCK(sc); 4721 if (startall) 4722 ieee80211_start_all(ic); 4723 else if (vap != NULL && stop) 4724 ieee80211_stop(vap); 4725 break; 4726 case SIOCGIFMEDIA: 4727 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4728 break; 4729 case SIOCGIWNSTATS: 4730 IWN_LOCK(sc); 4731 /* XXX validate permissions/memory/etc? */ 4732 error = copyout(&sc->last_stat, ifr->ifr_data, 4733 sizeof(struct iwn_stats)); 4734 IWN_UNLOCK(sc); 4735 break; 4736 case SIOCZIWNSTATS: 4737 IWN_LOCK(sc); 4738 memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); 4739 IWN_UNLOCK(sc); 4740 error = 0; 4741 break; 4742 default: 4743 error = EINVAL; 4744 break; 4745 } 4746 return error; 4747 } 4748 4749 /* 4750 * Send a command to the firmware. 4751 */ 4752 static int 4753 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4754 { 4755 struct iwn_tx_ring *ring; 4756 struct iwn_tx_desc *desc; 4757 struct iwn_tx_data *data; 4758 struct iwn_tx_cmd *cmd; 4759 struct mbuf *m; 4760 bus_addr_t paddr; 4761 int totlen, error; 4762 int cmd_queue_num; 4763 4764 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4765 4766 if (async == 0) 4767 IWN_LOCK_ASSERT(sc); 4768 4769 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4770 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4771 else 4772 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4773 4774 ring = &sc->txq[cmd_queue_num]; 4775 desc = &ring->desc[ring->cur]; 4776 data = &ring->data[ring->cur]; 4777 totlen = 4 + size; 4778 4779 if (size > sizeof cmd->data) { 4780 /* Command is too large to fit in a descriptor. */ 4781 if (totlen > MCLBYTES) 4782 return EINVAL; 4783 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4784 if (m == NULL) 4785 return ENOMEM; 4786 cmd = mtod(m, struct iwn_tx_cmd *); 4787 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4788 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4789 if (error != 0) { 4790 m_freem(m); 4791 return error; 4792 } 4793 data->m = m; 4794 } else { 4795 cmd = &ring->cmd[ring->cur]; 4796 paddr = data->cmd_paddr; 4797 } 4798 4799 cmd->code = code; 4800 cmd->flags = 0; 4801 cmd->qid = ring->qid; 4802 cmd->idx = ring->cur; 4803 memcpy(cmd->data, buf, size); 4804 4805 desc->nsegs = 1; 4806 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4807 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4808 4809 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4810 __func__, iwn_intr_str(cmd->code), cmd->code, 4811 cmd->flags, cmd->qid, cmd->idx); 4812 4813 if (size > sizeof cmd->data) { 4814 bus_dmamap_sync(ring->data_dmat, data->map, 4815 BUS_DMASYNC_PREWRITE); 4816 } else { 4817 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4818 BUS_DMASYNC_PREWRITE); 4819 } 4820 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4821 BUS_DMASYNC_PREWRITE); 4822 4823 /* Kick command ring. */ 4824 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4825 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4826 4827 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4828 4829 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4830 } 4831 4832 static int 4833 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4834 { 4835 struct iwn4965_node_info hnode; 4836 caddr_t src, dst; 4837 4838 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4839 4840 /* 4841 * We use the node structure for 5000 Series internally (it is 4842 * a superset of the one for 4965AGN). We thus copy the common 4843 * fields before sending the command. 4844 */ 4845 src = (caddr_t)node; 4846 dst = (caddr_t)&hnode; 4847 memcpy(dst, src, 48); 4848 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4849 memcpy(dst + 48, src + 72, 20); 4850 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4851 } 4852 4853 static int 4854 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4855 { 4856 4857 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4858 4859 /* Direct mapping. */ 4860 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4861 } 4862 4863 static int 4864 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4865 { 4866 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4867 struct iwn_node *wn = (void *)ni; 4868 struct ieee80211_rateset *rs; 4869 struct iwn_cmd_link_quality linkq; 4870 uint8_t txant; 4871 int i, rate, txrate; 4872 int is_11n; 4873 4874 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4875 4876 /* Use the first valid TX antenna. */ 4877 txant = IWN_LSB(sc->txchainmask); 4878 4879 memset(&linkq, 0, sizeof linkq); 4880 linkq.id = wn->id; 4881 linkq.antmsk_1stream = txant; 4882 4883 /* 4884 * The '2 stream' setup is a bit .. odd. 4885 * 4886 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4887 * the firmware panics (eg Intel 5100.) 4888 * 4889 * For NICs that support two antennas, we use ANT_AB. 4890 * 4891 * For NICs that support three antennas, we use the two that 4892 * wasn't the default one. 4893 * 4894 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 4895 * this to only one antenna. 4896 */ 4897 4898 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 4899 4900 /* Default - transmit on the other antennas */ 4901 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 4902 4903 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 4904 if (linkq.antmsk_2stream == 0) 4905 linkq.antmsk_2stream = IWN_ANT_AB; 4906 4907 /* 4908 * If the NIC is a two-stream TX NIC, configure the TX mask to 4909 * the default chainmask 4910 */ 4911 else if (sc->ntxchains == 2) 4912 linkq.antmsk_2stream = sc->txchainmask; 4913 4914 linkq.ampdu_max = 32; /* XXX negotiated? */ 4915 linkq.ampdu_threshold = 3; 4916 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4917 4918 DPRINTF(sc, IWN_DEBUG_XMIT, 4919 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 4920 __func__, 4921 linkq.antmsk_1stream, 4922 linkq.antmsk_2stream, 4923 sc->ntxchains); 4924 4925 /* 4926 * Are we using 11n rates? Ensure the channel is 4927 * 11n _and_ we have some 11n rates, or don't 4928 * try. 4929 */ 4930 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 4931 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4932 is_11n = 1; 4933 } else { 4934 rs = &ni->ni_rates; 4935 is_11n = 0; 4936 } 4937 4938 /* Start at highest available bit-rate. */ 4939 /* 4940 * XXX this is all very dirty! 4941 */ 4942 if (is_11n) 4943 txrate = ni->ni_htrates.rs_nrates - 1; 4944 else 4945 txrate = rs->rs_nrates - 1; 4946 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4947 uint32_t plcp; 4948 4949 if (is_11n) 4950 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 4951 else 4952 rate = RV(rs->rs_rates[txrate]); 4953 4954 DPRINTF(sc, IWN_DEBUG_XMIT, 4955 "%s: i=%d, txrate=%d, rate=0x%02x\n", 4956 __func__, 4957 i, 4958 txrate, 4959 rate); 4960 4961 /* Do rate -> PLCP config mapping */ 4962 plcp = iwn_rate_to_plcp(sc, ni, rate); 4963 linkq.retry[i] = plcp; 4964 4965 /* 4966 * The mimo field is an index into the table which 4967 * indicates the first index where it and subsequent entries 4968 * will not be using MIMO. 4969 * 4970 * Since we're filling linkq from 0..15 and we're filling 4971 * from the higest MCS rates to the lowest rates, if we 4972 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 4973 * the next entry.) That way if the next entry is a non-MIMO 4974 * entry, we're already pointing at it. 4975 */ 4976 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4977 RV(le32toh(plcp)) > 7) 4978 linkq.mimo = i + 1; 4979 4980 /* Next retry at immediate lower bit-rate. */ 4981 if (txrate > 0) 4982 txrate--; 4983 } 4984 4985 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4986 4987 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4988 #undef RV 4989 } 4990 4991 /* 4992 * Broadcast node is used to send group-addressed and management frames. 4993 */ 4994 static int 4995 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4996 { 4997 struct iwn_ops *ops = &sc->ops; 4998 struct ifnet *ifp = sc->sc_ifp; 4999 struct ieee80211com *ic = ifp->if_l2com; 5000 struct iwn_node_info node; 5001 struct iwn_cmd_link_quality linkq; 5002 uint8_t txant; 5003 int i, error; 5004 5005 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5006 5007 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5008 5009 memset(&node, 0, sizeof node); 5010 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 5011 node.id = sc->broadcast_id; 5012 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 5013 if ((error = ops->add_node(sc, &node, async)) != 0) 5014 return error; 5015 5016 /* Use the first valid TX antenna. */ 5017 txant = IWN_LSB(sc->txchainmask); 5018 5019 memset(&linkq, 0, sizeof linkq); 5020 linkq.id = sc->broadcast_id; 5021 linkq.antmsk_1stream = txant; 5022 linkq.antmsk_2stream = IWN_ANT_AB; 5023 linkq.ampdu_max = 64; 5024 linkq.ampdu_threshold = 3; 5025 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5026 5027 /* Use lowest mandatory bit-rate. */ 5028 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 5029 linkq.retry[0] = htole32(0xd); 5030 else 5031 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5032 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5033 /* Use same bit-rate for all TX retries. */ 5034 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5035 linkq.retry[i] = linkq.retry[0]; 5036 } 5037 5038 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5039 5040 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5041 } 5042 5043 static int 5044 iwn_updateedca(struct ieee80211com *ic) 5045 { 5046 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5047 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5048 struct iwn_edca_params cmd; 5049 int aci; 5050 5051 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5052 5053 memset(&cmd, 0, sizeof cmd); 5054 cmd.flags = htole32(IWN_EDCA_UPDATE); 5055 for (aci = 0; aci < WME_NUM_AC; aci++) { 5056 const struct wmeParams *ac = 5057 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5058 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5059 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5060 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5061 cmd.ac[aci].txoplimit = 5062 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5063 } 5064 IEEE80211_UNLOCK(ic); 5065 IWN_LOCK(sc); 5066 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5067 IWN_UNLOCK(sc); 5068 IEEE80211_LOCK(ic); 5069 5070 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5071 5072 return 0; 5073 #undef IWN_EXP2 5074 } 5075 5076 static void 5077 iwn_update_mcast(struct ifnet *ifp) 5078 { 5079 /* Ignore */ 5080 } 5081 5082 static void 5083 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5084 { 5085 struct iwn_cmd_led led; 5086 5087 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5088 5089 #if 0 5090 /* XXX don't set LEDs during scan? */ 5091 if (sc->sc_is_scanning) 5092 return; 5093 #endif 5094 5095 /* Clear microcode LED ownership. */ 5096 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5097 5098 led.which = which; 5099 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5100 led.off = off; 5101 led.on = on; 5102 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5103 } 5104 5105 /* 5106 * Set the critical temperature at which the firmware will stop the radio 5107 * and notify us. 5108 */ 5109 static int 5110 iwn_set_critical_temp(struct iwn_softc *sc) 5111 { 5112 struct iwn_critical_temp crit; 5113 int32_t temp; 5114 5115 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5116 5117 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5118 5119 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5120 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5121 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5122 temp = IWN_CTOK(110); 5123 else 5124 temp = 110; 5125 memset(&crit, 0, sizeof crit); 5126 crit.tempR = htole32(temp); 5127 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5128 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5129 } 5130 5131 static int 5132 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5133 { 5134 struct iwn_cmd_timing cmd; 5135 uint64_t val, mod; 5136 5137 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5138 5139 memset(&cmd, 0, sizeof cmd); 5140 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5141 cmd.bintval = htole16(ni->ni_intval); 5142 cmd.lintval = htole16(10); 5143 5144 /* Compute remaining time until next beacon. */ 5145 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5146 mod = le64toh(cmd.tstamp) % val; 5147 cmd.binitval = htole32((uint32_t)(val - mod)); 5148 5149 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5150 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5151 5152 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5153 } 5154 5155 static void 5156 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5157 { 5158 struct ifnet *ifp = sc->sc_ifp; 5159 struct ieee80211com *ic = ifp->if_l2com; 5160 5161 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5162 5163 /* Adjust TX power if need be (delta >= 3 degC). */ 5164 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5165 __func__, sc->temp, temp); 5166 if (abs(temp - sc->temp) >= 3) { 5167 /* Record temperature of last calibration. */ 5168 sc->temp = temp; 5169 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5170 } 5171 } 5172 5173 /* 5174 * Set TX power for current channel (each rate has its own power settings). 5175 * This function takes into account the regulatory information from EEPROM, 5176 * the current temperature and the current voltage. 5177 */ 5178 static int 5179 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5180 int async) 5181 { 5182 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5183 #define fdivround(a, b, n) \ 5184 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5185 /* Linear interpolation. */ 5186 #define interpolate(x, x1, y1, x2, y2, n) \ 5187 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5188 5189 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5190 struct iwn_ucode_info *uc = &sc->ucode_info; 5191 struct iwn4965_cmd_txpower cmd; 5192 struct iwn4965_eeprom_chan_samples *chans; 5193 const uint8_t *rf_gain, *dsp_gain; 5194 int32_t vdiff, tdiff; 5195 int i, c, grp, maxpwr; 5196 uint8_t chan; 5197 5198 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5199 /* Retrieve current channel from last RXON. */ 5200 chan = sc->rxon->chan; 5201 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5202 chan); 5203 5204 memset(&cmd, 0, sizeof cmd); 5205 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5206 cmd.chan = chan; 5207 5208 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5209 maxpwr = sc->maxpwr5GHz; 5210 rf_gain = iwn4965_rf_gain_5ghz; 5211 dsp_gain = iwn4965_dsp_gain_5ghz; 5212 } else { 5213 maxpwr = sc->maxpwr2GHz; 5214 rf_gain = iwn4965_rf_gain_2ghz; 5215 dsp_gain = iwn4965_dsp_gain_2ghz; 5216 } 5217 5218 /* Compute voltage compensation. */ 5219 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5220 if (vdiff > 0) 5221 vdiff *= 2; 5222 if (abs(vdiff) > 2) 5223 vdiff = 0; 5224 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5225 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5226 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5227 5228 /* Get channel attenuation group. */ 5229 if (chan <= 20) /* 1-20 */ 5230 grp = 4; 5231 else if (chan <= 43) /* 34-43 */ 5232 grp = 0; 5233 else if (chan <= 70) /* 44-70 */ 5234 grp = 1; 5235 else if (chan <= 124) /* 71-124 */ 5236 grp = 2; 5237 else /* 125-200 */ 5238 grp = 3; 5239 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5240 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5241 5242 /* Get channel sub-band. */ 5243 for (i = 0; i < IWN_NBANDS; i++) 5244 if (sc->bands[i].lo != 0 && 5245 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5246 break; 5247 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5248 return EINVAL; 5249 chans = sc->bands[i].chans; 5250 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5251 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5252 5253 for (c = 0; c < 2; c++) { 5254 uint8_t power, gain, temp; 5255 int maxchpwr, pwr, ridx, idx; 5256 5257 power = interpolate(chan, 5258 chans[0].num, chans[0].samples[c][1].power, 5259 chans[1].num, chans[1].samples[c][1].power, 1); 5260 gain = interpolate(chan, 5261 chans[0].num, chans[0].samples[c][1].gain, 5262 chans[1].num, chans[1].samples[c][1].gain, 1); 5263 temp = interpolate(chan, 5264 chans[0].num, chans[0].samples[c][1].temp, 5265 chans[1].num, chans[1].samples[c][1].temp, 1); 5266 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5267 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5268 __func__, c, power, gain, temp); 5269 5270 /* Compute temperature compensation. */ 5271 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5272 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5273 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5274 __func__, tdiff, sc->temp, temp); 5275 5276 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5277 /* Convert dBm to half-dBm. */ 5278 maxchpwr = sc->maxpwr[chan] * 2; 5279 if ((ridx / 8) & 1) 5280 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5281 5282 pwr = maxpwr; 5283 5284 /* Adjust TX power based on rate. */ 5285 if ((ridx % 8) == 5) 5286 pwr -= 15; /* OFDM48: -7.5dB */ 5287 else if ((ridx % 8) == 6) 5288 pwr -= 17; /* OFDM54: -8.5dB */ 5289 else if ((ridx % 8) == 7) 5290 pwr -= 20; /* OFDM60: -10dB */ 5291 else 5292 pwr -= 10; /* Others: -5dB */ 5293 5294 /* Do not exceed channel max TX power. */ 5295 if (pwr > maxchpwr) 5296 pwr = maxchpwr; 5297 5298 idx = gain - (pwr - power) - tdiff - vdiff; 5299 if ((ridx / 8) & 1) /* MIMO */ 5300 idx += (int32_t)le32toh(uc->atten[grp][c]); 5301 5302 if (cmd.band == 0) 5303 idx += 9; /* 5GHz */ 5304 if (ridx == IWN_RIDX_MAX) 5305 idx += 5; /* CCK */ 5306 5307 /* Make sure idx stays in a valid range. */ 5308 if (idx < 0) 5309 idx = 0; 5310 else if (idx > IWN4965_MAX_PWR_INDEX) 5311 idx = IWN4965_MAX_PWR_INDEX; 5312 5313 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5314 "%s: Tx chain %d, rate idx %d: power=%d\n", 5315 __func__, c, ridx, idx); 5316 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5317 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5318 } 5319 } 5320 5321 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5322 "%s: set tx power for chan %d\n", __func__, chan); 5323 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5324 5325 #undef interpolate 5326 #undef fdivround 5327 } 5328 5329 static int 5330 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5331 int async) 5332 { 5333 struct iwn5000_cmd_txpower cmd; 5334 5335 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5336 5337 /* 5338 * TX power calibration is handled automatically by the firmware 5339 * for 5000 Series. 5340 */ 5341 memset(&cmd, 0, sizeof cmd); 5342 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5343 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5344 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5345 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5346 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5347 } 5348 5349 /* 5350 * Retrieve the maximum RSSI (in dBm) among receivers. 5351 */ 5352 static int 5353 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5354 { 5355 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5356 uint8_t mask, agc; 5357 int rssi; 5358 5359 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5360 5361 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5362 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5363 5364 rssi = 0; 5365 if (mask & IWN_ANT_A) 5366 rssi = MAX(rssi, phy->rssi[0]); 5367 if (mask & IWN_ANT_B) 5368 rssi = MAX(rssi, phy->rssi[2]); 5369 if (mask & IWN_ANT_C) 5370 rssi = MAX(rssi, phy->rssi[4]); 5371 5372 DPRINTF(sc, IWN_DEBUG_RECV, 5373 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5374 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5375 rssi - agc - IWN_RSSI_TO_DBM); 5376 return rssi - agc - IWN_RSSI_TO_DBM; 5377 } 5378 5379 static int 5380 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5381 { 5382 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5383 uint8_t agc; 5384 int rssi; 5385 5386 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5387 5388 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5389 5390 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5391 le16toh(phy->rssi[1]) & 0xff); 5392 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5393 5394 DPRINTF(sc, IWN_DEBUG_RECV, 5395 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5396 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5397 rssi - agc - IWN_RSSI_TO_DBM); 5398 return rssi - agc - IWN_RSSI_TO_DBM; 5399 } 5400 5401 /* 5402 * Retrieve the average noise (in dBm) among receivers. 5403 */ 5404 static int 5405 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5406 { 5407 int i, total, nbant, noise; 5408 5409 total = nbant = 0; 5410 for (i = 0; i < 3; i++) { 5411 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5412 continue; 5413 total += noise; 5414 nbant++; 5415 } 5416 /* There should be at least one antenna but check anyway. */ 5417 return (nbant == 0) ? -127 : (total / nbant) - 107; 5418 } 5419 5420 /* 5421 * Compute temperature (in degC) from last received statistics. 5422 */ 5423 static int 5424 iwn4965_get_temperature(struct iwn_softc *sc) 5425 { 5426 struct iwn_ucode_info *uc = &sc->ucode_info; 5427 int32_t r1, r2, r3, r4, temp; 5428 5429 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5430 5431 r1 = le32toh(uc->temp[0].chan20MHz); 5432 r2 = le32toh(uc->temp[1].chan20MHz); 5433 r3 = le32toh(uc->temp[2].chan20MHz); 5434 r4 = le32toh(sc->rawtemp); 5435 5436 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5437 return 0; 5438 5439 /* Sign-extend 23-bit R4 value to 32-bit. */ 5440 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5441 /* Compute temperature in Kelvin. */ 5442 temp = (259 * (r4 - r2)) / (r3 - r1); 5443 temp = (temp * 97) / 100 + 8; 5444 5445 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5446 IWN_KTOC(temp)); 5447 return IWN_KTOC(temp); 5448 } 5449 5450 static int 5451 iwn5000_get_temperature(struct iwn_softc *sc) 5452 { 5453 int32_t temp; 5454 5455 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5456 5457 /* 5458 * Temperature is not used by the driver for 5000 Series because 5459 * TX power calibration is handled by firmware. 5460 */ 5461 temp = le32toh(sc->rawtemp); 5462 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5463 temp = (temp / -5) + sc->temp_off; 5464 temp = IWN_KTOC(temp); 5465 } 5466 return temp; 5467 } 5468 5469 /* 5470 * Initialize sensitivity calibration state machine. 5471 */ 5472 static int 5473 iwn_init_sensitivity(struct iwn_softc *sc) 5474 { 5475 struct iwn_ops *ops = &sc->ops; 5476 struct iwn_calib_state *calib = &sc->calib; 5477 uint32_t flags; 5478 int error; 5479 5480 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5481 5482 /* Reset calibration state machine. */ 5483 memset(calib, 0, sizeof (*calib)); 5484 calib->state = IWN_CALIB_STATE_INIT; 5485 calib->cck_state = IWN_CCK_STATE_HIFA; 5486 /* Set initial correlation values. */ 5487 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5488 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5489 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5490 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5491 calib->cck_x4 = 125; 5492 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5493 calib->energy_cck = sc->limits->energy_cck; 5494 5495 /* Write initial sensitivity. */ 5496 if ((error = iwn_send_sensitivity(sc)) != 0) 5497 return error; 5498 5499 /* Write initial gains. */ 5500 if ((error = ops->init_gains(sc)) != 0) 5501 return error; 5502 5503 /* Request statistics at each beacon interval. */ 5504 flags = 0; 5505 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5506 __func__); 5507 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5508 } 5509 5510 /* 5511 * Collect noise and RSSI statistics for the first 20 beacons received 5512 * after association and use them to determine connected antennas and 5513 * to set differential gains. 5514 */ 5515 static void 5516 iwn_collect_noise(struct iwn_softc *sc, 5517 const struct iwn_rx_general_stats *stats) 5518 { 5519 struct iwn_ops *ops = &sc->ops; 5520 struct iwn_calib_state *calib = &sc->calib; 5521 struct ifnet *ifp = sc->sc_ifp; 5522 struct ieee80211com *ic = ifp->if_l2com; 5523 uint32_t val; 5524 int i; 5525 5526 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5527 5528 /* Accumulate RSSI and noise for all 3 antennas. */ 5529 for (i = 0; i < 3; i++) { 5530 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5531 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5532 } 5533 /* NB: We update differential gains only once after 20 beacons. */ 5534 if (++calib->nbeacons < 20) 5535 return; 5536 5537 /* Determine highest average RSSI. */ 5538 val = MAX(calib->rssi[0], calib->rssi[1]); 5539 val = MAX(calib->rssi[2], val); 5540 5541 /* Determine which antennas are connected. */ 5542 sc->chainmask = sc->rxchainmask; 5543 for (i = 0; i < 3; i++) 5544 if (val - calib->rssi[i] > 15 * 20) 5545 sc->chainmask &= ~(1 << i); 5546 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5547 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5548 __func__, sc->rxchainmask, sc->chainmask); 5549 5550 /* If none of the TX antennas are connected, keep at least one. */ 5551 if ((sc->chainmask & sc->txchainmask) == 0) 5552 sc->chainmask |= IWN_LSB(sc->txchainmask); 5553 5554 (void)ops->set_gains(sc); 5555 calib->state = IWN_CALIB_STATE_RUN; 5556 5557 #ifdef notyet 5558 /* XXX Disable RX chains with no antennas connected. */ 5559 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5560 if (sc->sc_is_scanning) 5561 device_printf(sc->sc_dev, 5562 "%s: is_scanning set, before RXON\n", 5563 __func__); 5564 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5565 #endif 5566 5567 /* Enable power-saving mode if requested by user. */ 5568 if (ic->ic_flags & IEEE80211_F_PMGTON) 5569 (void)iwn_set_pslevel(sc, 0, 3, 1); 5570 5571 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5572 5573 } 5574 5575 static int 5576 iwn4965_init_gains(struct iwn_softc *sc) 5577 { 5578 struct iwn_phy_calib_gain cmd; 5579 5580 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5581 5582 memset(&cmd, 0, sizeof cmd); 5583 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5584 /* Differential gains initially set to 0 for all 3 antennas. */ 5585 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5586 "%s: setting initial differential gains\n", __func__); 5587 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5588 } 5589 5590 static int 5591 iwn5000_init_gains(struct iwn_softc *sc) 5592 { 5593 struct iwn_phy_calib cmd; 5594 5595 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5596 5597 memset(&cmd, 0, sizeof cmd); 5598 cmd.code = sc->reset_noise_gain; 5599 cmd.ngroups = 1; 5600 cmd.isvalid = 1; 5601 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5602 "%s: setting initial differential gains\n", __func__); 5603 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5604 } 5605 5606 static int 5607 iwn4965_set_gains(struct iwn_softc *sc) 5608 { 5609 struct iwn_calib_state *calib = &sc->calib; 5610 struct iwn_phy_calib_gain cmd; 5611 int i, delta, noise; 5612 5613 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5614 5615 /* Get minimal noise among connected antennas. */ 5616 noise = INT_MAX; /* NB: There's at least one antenna. */ 5617 for (i = 0; i < 3; i++) 5618 if (sc->chainmask & (1 << i)) 5619 noise = MIN(calib->noise[i], noise); 5620 5621 memset(&cmd, 0, sizeof cmd); 5622 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5623 /* Set differential gains for connected antennas. */ 5624 for (i = 0; i < 3; i++) { 5625 if (sc->chainmask & (1 << i)) { 5626 /* Compute attenuation (in unit of 1.5dB). */ 5627 delta = (noise - (int32_t)calib->noise[i]) / 30; 5628 /* NB: delta <= 0 */ 5629 /* Limit to [-4.5dB,0]. */ 5630 cmd.gain[i] = MIN(abs(delta), 3); 5631 if (delta < 0) 5632 cmd.gain[i] |= 1 << 2; /* sign bit */ 5633 } 5634 } 5635 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5636 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5637 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5638 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5639 } 5640 5641 static int 5642 iwn5000_set_gains(struct iwn_softc *sc) 5643 { 5644 struct iwn_calib_state *calib = &sc->calib; 5645 struct iwn_phy_calib_gain cmd; 5646 int i, ant, div, delta; 5647 5648 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5649 5650 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5651 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5652 5653 memset(&cmd, 0, sizeof cmd); 5654 cmd.code = sc->noise_gain; 5655 cmd.ngroups = 1; 5656 cmd.isvalid = 1; 5657 /* Get first available RX antenna as referential. */ 5658 ant = IWN_LSB(sc->rxchainmask); 5659 /* Set differential gains for other antennas. */ 5660 for (i = ant + 1; i < 3; i++) { 5661 if (sc->chainmask & (1 << i)) { 5662 /* The delta is relative to antenna "ant". */ 5663 delta = ((int32_t)calib->noise[ant] - 5664 (int32_t)calib->noise[i]) / div; 5665 /* Limit to [-4.5dB,+4.5dB]. */ 5666 cmd.gain[i - 1] = MIN(abs(delta), 3); 5667 if (delta < 0) 5668 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5669 } 5670 } 5671 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5672 "setting differential gains Ant B/C: %x/%x (%x)\n", 5673 cmd.gain[0], cmd.gain[1], sc->chainmask); 5674 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5675 } 5676 5677 /* 5678 * Tune RF RX sensitivity based on the number of false alarms detected 5679 * during the last beacon period. 5680 */ 5681 static void 5682 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5683 { 5684 #define inc(val, inc, max) \ 5685 if ((val) < (max)) { \ 5686 if ((val) < (max) - (inc)) \ 5687 (val) += (inc); \ 5688 else \ 5689 (val) = (max); \ 5690 needs_update = 1; \ 5691 } 5692 #define dec(val, dec, min) \ 5693 if ((val) > (min)) { \ 5694 if ((val) > (min) + (dec)) \ 5695 (val) -= (dec); \ 5696 else \ 5697 (val) = (min); \ 5698 needs_update = 1; \ 5699 } 5700 5701 const struct iwn_sensitivity_limits *limits = sc->limits; 5702 struct iwn_calib_state *calib = &sc->calib; 5703 uint32_t val, rxena, fa; 5704 uint32_t energy[3], energy_min; 5705 uint8_t noise[3], noise_ref; 5706 int i, needs_update = 0; 5707 5708 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5709 5710 /* Check that we've been enabled long enough. */ 5711 if ((rxena = le32toh(stats->general.load)) == 0){ 5712 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5713 return; 5714 } 5715 5716 /* Compute number of false alarms since last call for OFDM. */ 5717 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5718 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5719 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5720 5721 if (fa > 50 * rxena) { 5722 /* High false alarm count, decrease sensitivity. */ 5723 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5724 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5725 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5726 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5727 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5728 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5729 5730 } else if (fa < 5 * rxena) { 5731 /* Low false alarm count, increase sensitivity. */ 5732 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5733 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5734 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5735 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5736 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5737 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5738 } 5739 5740 /* Compute maximum noise among 3 receivers. */ 5741 for (i = 0; i < 3; i++) 5742 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5743 val = MAX(noise[0], noise[1]); 5744 val = MAX(noise[2], val); 5745 /* Insert it into our samples table. */ 5746 calib->noise_samples[calib->cur_noise_sample] = val; 5747 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5748 5749 /* Compute maximum noise among last 20 samples. */ 5750 noise_ref = calib->noise_samples[0]; 5751 for (i = 1; i < 20; i++) 5752 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5753 5754 /* Compute maximum energy among 3 receivers. */ 5755 for (i = 0; i < 3; i++) 5756 energy[i] = le32toh(stats->general.energy[i]); 5757 val = MIN(energy[0], energy[1]); 5758 val = MIN(energy[2], val); 5759 /* Insert it into our samples table. */ 5760 calib->energy_samples[calib->cur_energy_sample] = val; 5761 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5762 5763 /* Compute minimum energy among last 10 samples. */ 5764 energy_min = calib->energy_samples[0]; 5765 for (i = 1; i < 10; i++) 5766 energy_min = MAX(energy_min, calib->energy_samples[i]); 5767 energy_min += 6; 5768 5769 /* Compute number of false alarms since last call for CCK. */ 5770 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5771 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5772 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5773 5774 if (fa > 50 * rxena) { 5775 /* High false alarm count, decrease sensitivity. */ 5776 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5777 "%s: CCK high false alarm count: %u\n", __func__, fa); 5778 calib->cck_state = IWN_CCK_STATE_HIFA; 5779 calib->low_fa = 0; 5780 5781 if (calib->cck_x4 > 160) { 5782 calib->noise_ref = noise_ref; 5783 if (calib->energy_cck > 2) 5784 dec(calib->energy_cck, 2, energy_min); 5785 } 5786 if (calib->cck_x4 < 160) { 5787 calib->cck_x4 = 161; 5788 needs_update = 1; 5789 } else 5790 inc(calib->cck_x4, 3, limits->max_cck_x4); 5791 5792 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5793 5794 } else if (fa < 5 * rxena) { 5795 /* Low false alarm count, increase sensitivity. */ 5796 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5797 "%s: CCK low false alarm count: %u\n", __func__, fa); 5798 calib->cck_state = IWN_CCK_STATE_LOFA; 5799 calib->low_fa++; 5800 5801 if (calib->cck_state != IWN_CCK_STATE_INIT && 5802 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5803 calib->low_fa > 100)) { 5804 inc(calib->energy_cck, 2, limits->min_energy_cck); 5805 dec(calib->cck_x4, 3, limits->min_cck_x4); 5806 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5807 } 5808 } else { 5809 /* Not worth to increase or decrease sensitivity. */ 5810 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5811 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5812 calib->low_fa = 0; 5813 calib->noise_ref = noise_ref; 5814 5815 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5816 /* Previous interval had many false alarms. */ 5817 dec(calib->energy_cck, 8, energy_min); 5818 } 5819 calib->cck_state = IWN_CCK_STATE_INIT; 5820 } 5821 5822 if (needs_update) 5823 (void)iwn_send_sensitivity(sc); 5824 5825 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5826 5827 #undef dec 5828 #undef inc 5829 } 5830 5831 static int 5832 iwn_send_sensitivity(struct iwn_softc *sc) 5833 { 5834 struct iwn_calib_state *calib = &sc->calib; 5835 struct iwn_enhanced_sensitivity_cmd cmd; 5836 int len; 5837 5838 memset(&cmd, 0, sizeof cmd); 5839 len = sizeof (struct iwn_sensitivity_cmd); 5840 cmd.which = IWN_SENSITIVITY_WORKTBL; 5841 /* OFDM modulation. */ 5842 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5843 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5844 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5845 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5846 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5847 cmd.energy_ofdm_th = htole16(62); 5848 /* CCK modulation. */ 5849 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5850 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5851 cmd.energy_cck = htole16(calib->energy_cck); 5852 /* Barker modulation: use default values. */ 5853 cmd.corr_barker = htole16(190); 5854 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 5855 5856 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5857 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5858 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5859 calib->ofdm_mrc_x4, calib->cck_x4, 5860 calib->cck_mrc_x4, calib->energy_cck); 5861 5862 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5863 goto send; 5864 /* Enhanced sensitivity settings. */ 5865 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5866 cmd.ofdm_det_slope_mrc = htole16(668); 5867 cmd.ofdm_det_icept_mrc = htole16(4); 5868 cmd.ofdm_det_slope = htole16(486); 5869 cmd.ofdm_det_icept = htole16(37); 5870 cmd.cck_det_slope_mrc = htole16(853); 5871 cmd.cck_det_icept_mrc = htole16(4); 5872 cmd.cck_det_slope = htole16(476); 5873 cmd.cck_det_icept = htole16(99); 5874 send: 5875 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5876 } 5877 5878 /* 5879 * Look at the increase of PLCP errors over time; if it exceeds 5880 * a programmed threshold then trigger an RF retune. 5881 */ 5882 static void 5883 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 5884 { 5885 int32_t delta_ofdm, delta_ht, delta_cck; 5886 struct iwn_calib_state *calib = &sc->calib; 5887 int delta_ticks, cur_ticks; 5888 int delta_msec; 5889 int thresh; 5890 5891 /* 5892 * Calculate the difference between the current and 5893 * previous statistics. 5894 */ 5895 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 5896 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5897 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 5898 5899 /* 5900 * Calculate the delta in time between successive statistics 5901 * messages. Yes, it can roll over; so we make sure that 5902 * this doesn't happen. 5903 * 5904 * XXX go figure out what to do about rollover 5905 * XXX go figure out what to do if ticks rolls over to -ve instead! 5906 * XXX go stab signed integer overflow undefined-ness in the face. 5907 */ 5908 cur_ticks = ticks; 5909 delta_ticks = cur_ticks - sc->last_calib_ticks; 5910 5911 /* 5912 * If any are negative, then the firmware likely reset; so just 5913 * bail. We'll pick this up next time. 5914 */ 5915 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 5916 return; 5917 5918 /* 5919 * delta_ticks is in ticks; we need to convert it up to milliseconds 5920 * so we can do some useful math with it. 5921 */ 5922 delta_msec = ticks_to_msecs(delta_ticks); 5923 5924 /* 5925 * Calculate what our threshold is given the current delta_msec. 5926 */ 5927 thresh = sc->base_params->plcp_err_threshold * delta_msec; 5928 5929 DPRINTF(sc, IWN_DEBUG_STATE, 5930 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 5931 __func__, 5932 delta_msec, 5933 delta_cck, 5934 delta_ofdm, 5935 delta_ht, 5936 (delta_msec + delta_cck + delta_ofdm + delta_ht), 5937 thresh); 5938 5939 /* 5940 * If we need a retune, then schedule a single channel scan 5941 * to a channel that isn't the currently active one! 5942 * 5943 * The math from linux iwlwifi: 5944 * 5945 * if ((delta * 100 / msecs) > threshold) 5946 */ 5947 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 5948 DPRINTF(sc, IWN_DEBUG_ANY, 5949 "%s: PLCP error threshold raw (%d) comparison (%d) " 5950 "over limit (%d); retune!\n", 5951 __func__, 5952 (delta_cck + delta_ofdm + delta_ht), 5953 (delta_cck + delta_ofdm + delta_ht) * 100, 5954 thresh); 5955 } 5956 } 5957 5958 /* 5959 * Set STA mode power saving level (between 0 and 5). 5960 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5961 */ 5962 static int 5963 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5964 { 5965 struct iwn_pmgt_cmd cmd; 5966 const struct iwn_pmgt *pmgt; 5967 uint32_t max, skip_dtim; 5968 uint32_t reg; 5969 int i; 5970 5971 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5972 "%s: dtim=%d, level=%d, async=%d\n", 5973 __func__, 5974 dtim, 5975 level, 5976 async); 5977 5978 /* Select which PS parameters to use. */ 5979 if (dtim <= 2) 5980 pmgt = &iwn_pmgt[0][level]; 5981 else if (dtim <= 10) 5982 pmgt = &iwn_pmgt[1][level]; 5983 else 5984 pmgt = &iwn_pmgt[2][level]; 5985 5986 memset(&cmd, 0, sizeof cmd); 5987 if (level != 0) /* not CAM */ 5988 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5989 if (level == 5) 5990 cmd.flags |= htole16(IWN_PS_FAST_PD); 5991 /* Retrieve PCIe Active State Power Management (ASPM). */ 5992 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5993 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5994 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5995 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5996 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5997 5998 if (dtim == 0) { 5999 dtim = 1; 6000 skip_dtim = 0; 6001 } else 6002 skip_dtim = pmgt->skip_dtim; 6003 if (skip_dtim != 0) { 6004 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 6005 max = pmgt->intval[4]; 6006 if (max == (uint32_t)-1) 6007 max = dtim * (skip_dtim + 1); 6008 else if (max > dtim) 6009 max = (max / dtim) * dtim; 6010 } else 6011 max = dtim; 6012 for (i = 0; i < 5; i++) 6013 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 6014 6015 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 6016 level); 6017 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 6018 } 6019 6020 static int 6021 iwn_send_btcoex(struct iwn_softc *sc) 6022 { 6023 struct iwn_bluetooth cmd; 6024 6025 memset(&cmd, 0, sizeof cmd); 6026 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 6027 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 6028 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 6029 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 6030 __func__); 6031 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 6032 } 6033 6034 static int 6035 iwn_send_advanced_btcoex(struct iwn_softc *sc) 6036 { 6037 static const uint32_t btcoex_3wire[12] = { 6038 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 6039 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 6040 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 6041 }; 6042 struct iwn6000_btcoex_config btconfig; 6043 struct iwn2000_btcoex_config btconfig2k; 6044 struct iwn_btcoex_priotable btprio; 6045 struct iwn_btcoex_prot btprot; 6046 int error, i; 6047 uint8_t flags; 6048 6049 memset(&btconfig, 0, sizeof btconfig); 6050 memset(&btconfig2k, 0, sizeof btconfig2k); 6051 6052 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6053 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6054 6055 if (sc->base_params->bt_sco_disable) 6056 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6057 else 6058 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6059 6060 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6061 6062 /* Default flags result is 145 as old value */ 6063 6064 /* 6065 * Flags value has to be review. Values must change if we 6066 * which to disable it 6067 */ 6068 if (sc->base_params->bt_session_2) { 6069 btconfig2k.flags = flags; 6070 btconfig2k.max_kill = 5; 6071 btconfig2k.bt3_t7_timer = 1; 6072 btconfig2k.kill_ack = htole32(0xffff0000); 6073 btconfig2k.kill_cts = htole32(0xffff0000); 6074 btconfig2k.sample_time = 2; 6075 btconfig2k.bt3_t2_timer = 0xc; 6076 6077 for (i = 0; i < 12; i++) 6078 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6079 btconfig2k.valid = htole16(0xff); 6080 btconfig2k.prio_boost = htole32(0xf0); 6081 DPRINTF(sc, IWN_DEBUG_RESET, 6082 "%s: configuring advanced bluetooth coexistence" 6083 " session 2, flags : 0x%x\n", 6084 __func__, 6085 flags); 6086 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6087 sizeof(btconfig2k), 1); 6088 } else { 6089 btconfig.flags = flags; 6090 btconfig.max_kill = 5; 6091 btconfig.bt3_t7_timer = 1; 6092 btconfig.kill_ack = htole32(0xffff0000); 6093 btconfig.kill_cts = htole32(0xffff0000); 6094 btconfig.sample_time = 2; 6095 btconfig.bt3_t2_timer = 0xc; 6096 6097 for (i = 0; i < 12; i++) 6098 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6099 btconfig.valid = htole16(0xff); 6100 btconfig.prio_boost = 0xf0; 6101 DPRINTF(sc, IWN_DEBUG_RESET, 6102 "%s: configuring advanced bluetooth coexistence," 6103 " flags : 0x%x\n", 6104 __func__, 6105 flags); 6106 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6107 sizeof(btconfig), 1); 6108 } 6109 6110 if (error != 0) 6111 return error; 6112 6113 memset(&btprio, 0, sizeof btprio); 6114 btprio.calib_init1 = 0x6; 6115 btprio.calib_init2 = 0x7; 6116 btprio.calib_periodic_low1 = 0x2; 6117 btprio.calib_periodic_low2 = 0x3; 6118 btprio.calib_periodic_high1 = 0x4; 6119 btprio.calib_periodic_high2 = 0x5; 6120 btprio.dtim = 0x6; 6121 btprio.scan52 = 0x8; 6122 btprio.scan24 = 0xa; 6123 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6124 1); 6125 if (error != 0) 6126 return error; 6127 6128 /* Force BT state machine change. */ 6129 memset(&btprot, 0, sizeof btprot); 6130 btprot.open = 1; 6131 btprot.type = 1; 6132 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6133 if (error != 0) 6134 return error; 6135 btprot.open = 0; 6136 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6137 } 6138 6139 static int 6140 iwn5000_runtime_calib(struct iwn_softc *sc) 6141 { 6142 struct iwn5000_calib_config cmd; 6143 6144 memset(&cmd, 0, sizeof cmd); 6145 cmd.ucode.once.enable = 0xffffffff; 6146 cmd.ucode.once.start = IWN5000_CALIB_DC; 6147 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6148 "%s: configuring runtime calibration\n", __func__); 6149 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6150 } 6151 6152 static int 6153 iwn_config(struct iwn_softc *sc) 6154 { 6155 struct iwn_ops *ops = &sc->ops; 6156 struct ifnet *ifp = sc->sc_ifp; 6157 struct ieee80211com *ic = ifp->if_l2com; 6158 uint32_t txmask; 6159 uint16_t rxchain; 6160 int error; 6161 6162 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6163 6164 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6165 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6166 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6167 " exclusive each together. Review NIC config file. Conf" 6168 " : 0x%08x Flags : 0x%08x \n", __func__, 6169 sc->base_params->calib_need, 6170 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6171 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6172 return (EINVAL); 6173 } 6174 6175 /* Compute temperature calib if needed. Will be send by send calib */ 6176 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6177 error = iwn5000_temp_offset_calib(sc); 6178 if (error != 0) { 6179 device_printf(sc->sc_dev, 6180 "%s: could not set temperature offset\n", __func__); 6181 return (error); 6182 } 6183 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6184 error = iwn5000_temp_offset_calibv2(sc); 6185 if (error != 0) { 6186 device_printf(sc->sc_dev, 6187 "%s: could not compute temperature offset v2\n", 6188 __func__); 6189 return (error); 6190 } 6191 } 6192 6193 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6194 /* Configure runtime DC calibration. */ 6195 error = iwn5000_runtime_calib(sc); 6196 if (error != 0) { 6197 device_printf(sc->sc_dev, 6198 "%s: could not configure runtime calibration\n", 6199 __func__); 6200 return error; 6201 } 6202 } 6203 6204 /* Configure valid TX chains for >=5000 Series. */ 6205 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6206 txmask = htole32(sc->txchainmask); 6207 DPRINTF(sc, IWN_DEBUG_RESET, 6208 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6209 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6210 sizeof txmask, 0); 6211 if (error != 0) { 6212 device_printf(sc->sc_dev, 6213 "%s: could not configure valid TX chains, " 6214 "error %d\n", __func__, error); 6215 return error; 6216 } 6217 } 6218 6219 /* Configure bluetooth coexistence. */ 6220 error = 0; 6221 6222 /* Configure bluetooth coexistence if needed. */ 6223 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6224 error = iwn_send_advanced_btcoex(sc); 6225 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6226 error = iwn_send_btcoex(sc); 6227 6228 if (error != 0) { 6229 device_printf(sc->sc_dev, 6230 "%s: could not configure bluetooth coexistence, error %d\n", 6231 __func__, error); 6232 return error; 6233 } 6234 6235 /* Set mode, channel, RX filter and enable RX. */ 6236 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6237 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6238 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6239 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6240 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6241 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6242 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6243 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6244 switch (ic->ic_opmode) { 6245 case IEEE80211_M_STA: 6246 sc->rxon->mode = IWN_MODE_STA; 6247 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6248 break; 6249 case IEEE80211_M_MONITOR: 6250 sc->rxon->mode = IWN_MODE_MONITOR; 6251 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6252 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6253 break; 6254 default: 6255 /* Should not get there. */ 6256 break; 6257 } 6258 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6259 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6260 sc->rxon->ht_single_mask = 0xff; 6261 sc->rxon->ht_dual_mask = 0xff; 6262 sc->rxon->ht_triple_mask = 0xff; 6263 rxchain = 6264 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6265 IWN_RXCHAIN_MIMO_COUNT(2) | 6266 IWN_RXCHAIN_IDLE_COUNT(2); 6267 sc->rxon->rxchain = htole16(rxchain); 6268 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6269 if (sc->sc_is_scanning) 6270 device_printf(sc->sc_dev, 6271 "%s: is_scanning set, before RXON\n", 6272 __func__); 6273 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6274 if (error != 0) { 6275 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6276 __func__); 6277 return error; 6278 } 6279 6280 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6281 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6282 __func__); 6283 return error; 6284 } 6285 6286 /* Configuration has changed, set TX power accordingly. */ 6287 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6288 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6289 __func__); 6290 return error; 6291 } 6292 6293 if ((error = iwn_set_critical_temp(sc)) != 0) { 6294 device_printf(sc->sc_dev, 6295 "%s: could not set critical temperature\n", __func__); 6296 return error; 6297 } 6298 6299 /* Set power saving level to CAM during initialization. */ 6300 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6301 device_printf(sc->sc_dev, 6302 "%s: could not set power saving level\n", __func__); 6303 return error; 6304 } 6305 6306 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6307 6308 return 0; 6309 } 6310 6311 /* 6312 * Add an ssid element to a frame. 6313 */ 6314 static uint8_t * 6315 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6316 { 6317 *frm++ = IEEE80211_ELEMID_SSID; 6318 *frm++ = len; 6319 memcpy(frm, ssid, len); 6320 return frm + len; 6321 } 6322 6323 static uint16_t 6324 iwn_get_active_dwell_time(struct iwn_softc *sc, 6325 struct ieee80211_channel *c, uint8_t n_probes) 6326 { 6327 /* No channel? Default to 2GHz settings */ 6328 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6329 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6330 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6331 } 6332 6333 /* 5GHz dwell time */ 6334 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6335 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6336 } 6337 6338 /* 6339 * Limit the total dwell time to 85% of the beacon interval. 6340 * 6341 * Returns the dwell time in milliseconds. 6342 */ 6343 static uint16_t 6344 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6345 { 6346 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6347 struct ieee80211vap *vap = NULL; 6348 int bintval = 0; 6349 6350 /* bintval is in TU (1.024mS) */ 6351 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6352 vap = TAILQ_FIRST(&ic->ic_vaps); 6353 bintval = vap->iv_bss->ni_intval; 6354 } 6355 6356 /* 6357 * If it's non-zero, we should calculate the minimum of 6358 * it and the DWELL_BASE. 6359 * 6360 * XXX Yes, the math should take into account that bintval 6361 * is 1.024mS, not 1mS.. 6362 */ 6363 if (bintval > 0) { 6364 DPRINTF(sc, IWN_DEBUG_SCAN, 6365 "%s: bintval=%d\n", 6366 __func__, 6367 bintval); 6368 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6369 } 6370 6371 /* No association context? Default */ 6372 return (IWN_PASSIVE_DWELL_BASE); 6373 } 6374 6375 static uint16_t 6376 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6377 { 6378 uint16_t passive; 6379 6380 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6381 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6382 } else { 6383 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6384 } 6385 6386 /* Clamp to the beacon interval if we're associated */ 6387 return (iwn_limit_dwell(sc, passive)); 6388 } 6389 6390 static int 6391 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 6392 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 6393 { 6394 struct ifnet *ifp = sc->sc_ifp; 6395 struct ieee80211com *ic = ifp->if_l2com; 6396 struct ieee80211_node *ni = vap->iv_bss; 6397 struct iwn_scan_hdr *hdr; 6398 struct iwn_cmd_data *tx; 6399 struct iwn_scan_essid *essid; 6400 struct iwn_scan_chan *chan; 6401 struct ieee80211_frame *wh; 6402 struct ieee80211_rateset *rs; 6403 uint8_t *buf, *frm; 6404 uint16_t rxchain; 6405 uint8_t txant; 6406 int buflen, error; 6407 int is_active; 6408 uint16_t dwell_active, dwell_passive; 6409 uint32_t extra, scan_service_time; 6410 6411 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6412 6413 /* 6414 * We are absolutely not allowed to send a scan command when another 6415 * scan command is pending. 6416 */ 6417 if (sc->sc_is_scanning) { 6418 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6419 __func__); 6420 return (EAGAIN); 6421 } 6422 6423 /* Assign the scan channel */ 6424 c = ic->ic_curchan; 6425 6426 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6427 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 6428 if (buf == NULL) { 6429 device_printf(sc->sc_dev, 6430 "%s: could not allocate buffer for scan command\n", 6431 __func__); 6432 return ENOMEM; 6433 } 6434 hdr = (struct iwn_scan_hdr *)buf; 6435 /* 6436 * Move to the next channel if no frames are received within 10ms 6437 * after sending the probe request. 6438 */ 6439 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6440 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6441 /* 6442 * Max needs to be greater than active and passive and quiet! 6443 * It's also in microseconds! 6444 */ 6445 hdr->max_svc = htole32(250 * 1024); 6446 6447 /* 6448 * Reset scan: interval=100 6449 * Normal scan: interval=becaon interval 6450 * suspend_time: 100 (TU) 6451 * 6452 */ 6453 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6454 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6455 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6456 hdr->pause_svc = htole32(scan_service_time); 6457 6458 /* Select antennas for scanning. */ 6459 rxchain = 6460 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6461 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6462 IWN_RXCHAIN_DRIVER_FORCE; 6463 if (IEEE80211_IS_CHAN_A(c) && 6464 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6465 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6466 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6467 } else /* Use all available RX antennas. */ 6468 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6469 hdr->rxchain = htole16(rxchain); 6470 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6471 6472 tx = (struct iwn_cmd_data *)(hdr + 1); 6473 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6474 tx->id = sc->broadcast_id; 6475 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6476 6477 if (IEEE80211_IS_CHAN_5GHZ(c)) { 6478 /* Send probe requests at 6Mbps. */ 6479 tx->rate = htole32(0xd); 6480 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6481 } else { 6482 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6483 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6484 sc->rxon->associd && sc->rxon->chan > 14) 6485 tx->rate = htole32(0xd); 6486 else { 6487 /* Send probe requests at 1Mbps. */ 6488 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6489 } 6490 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6491 } 6492 /* Use the first valid TX antenna. */ 6493 txant = IWN_LSB(sc->txchainmask); 6494 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6495 6496 /* 6497 * Only do active scanning if we're announcing a probe request 6498 * for a given SSID (or more, if we ever add it to the driver.) 6499 */ 6500 is_active = 0; 6501 6502 /* 6503 * If we're scanning for a specific SSID, add it to the command. 6504 * 6505 * XXX maybe look at adding support for scanning multiple SSIDs? 6506 */ 6507 essid = (struct iwn_scan_essid *)(tx + 1); 6508 if (ss != NULL) { 6509 if (ss->ss_ssid[0].len != 0) { 6510 essid[0].id = IEEE80211_ELEMID_SSID; 6511 essid[0].len = ss->ss_ssid[0].len; 6512 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6513 } 6514 6515 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6516 __func__, 6517 ss->ss_ssid[0].len, 6518 ss->ss_ssid[0].len, 6519 ss->ss_ssid[0].ssid); 6520 6521 if (ss->ss_nssid > 0) 6522 is_active = 1; 6523 } 6524 6525 /* 6526 * Build a probe request frame. Most of the following code is a 6527 * copy & paste of what is done in net80211. 6528 */ 6529 wh = (struct ieee80211_frame *)(essid + 20); 6530 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6531 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6532 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6533 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6534 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6535 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6536 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6537 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6538 6539 frm = (uint8_t *)(wh + 1); 6540 frm = ieee80211_add_ssid(frm, NULL, 0); 6541 frm = ieee80211_add_rates(frm, rs); 6542 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6543 frm = ieee80211_add_xrates(frm, rs); 6544 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6545 frm = ieee80211_add_htcap(frm, ni); 6546 6547 /* Set length of probe request. */ 6548 tx->len = htole16(frm - (uint8_t *)wh); 6549 6550 /* 6551 * If active scanning is requested but a certain channel is 6552 * marked passive, we can do active scanning if we detect 6553 * transmissions. 6554 * 6555 * There is an issue with some firmware versions that triggers 6556 * a sysassert on a "good CRC threshold" of zero (== disabled), 6557 * on a radar channel even though this means that we should NOT 6558 * send probes. 6559 * 6560 * The "good CRC threshold" is the number of frames that we 6561 * need to receive during our dwell time on a channel before 6562 * sending out probes -- setting this to a huge value will 6563 * mean we never reach it, but at the same time work around 6564 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6565 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6566 * 6567 * This was fixed in later versions along with some other 6568 * scan changes, and the threshold behaves as a flag in those 6569 * versions. 6570 */ 6571 6572 /* 6573 * If we're doing active scanning, set the crc_threshold 6574 * to a suitable value. This is different to active veruss 6575 * passive scanning depending upon the channel flags; the 6576 * firmware will obey that particular check for us. 6577 */ 6578 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6579 hdr->crc_threshold = is_active ? 6580 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6581 else 6582 hdr->crc_threshold = is_active ? 6583 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6584 6585 chan = (struct iwn_scan_chan *)frm; 6586 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6587 chan->flags = 0; 6588 if (ss->ss_nssid > 0) 6589 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6590 chan->dsp_gain = 0x6e; 6591 6592 /* 6593 * Set the passive/active flag depending upon the channel mode. 6594 * XXX TODO: take the is_active flag into account as well? 6595 */ 6596 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6597 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6598 else 6599 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6600 6601 /* 6602 * Calculate the active/passive dwell times. 6603 */ 6604 6605 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6606 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6607 6608 /* Make sure they're valid */ 6609 if (dwell_passive <= dwell_active) 6610 dwell_passive = dwell_active + 1; 6611 6612 chan->active = htole16(dwell_active); 6613 chan->passive = htole16(dwell_passive); 6614 6615 if (IEEE80211_IS_CHAN_5GHZ(c) && 6616 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6617 chan->rf_gain = 0x3b; 6618 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6619 chan->rf_gain = 0x3b; 6620 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6621 chan->rf_gain = 0x28; 6622 } else { 6623 chan->rf_gain = 0x28; 6624 } 6625 6626 DPRINTF(sc, IWN_DEBUG_STATE, 6627 "%s: chan %u flags 0x%x rf_gain 0x%x " 6628 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6629 "isactive=%d numssid=%d\n", __func__, 6630 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6631 dwell_active, dwell_passive, scan_service_time, 6632 hdr->crc_threshold, is_active, ss->ss_nssid); 6633 6634 hdr->nchan++; 6635 chan++; 6636 buflen = (uint8_t *)chan - buf; 6637 hdr->len = htole16(buflen); 6638 6639 if (sc->sc_is_scanning) { 6640 device_printf(sc->sc_dev, 6641 "%s: called with is_scanning set!\n", 6642 __func__); 6643 } 6644 sc->sc_is_scanning = 1; 6645 6646 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6647 hdr->nchan); 6648 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6649 free(buf, M_DEVBUF); 6650 6651 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6652 6653 return error; 6654 } 6655 6656 static int 6657 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6658 { 6659 struct iwn_ops *ops = &sc->ops; 6660 struct ifnet *ifp = sc->sc_ifp; 6661 struct ieee80211com *ic = ifp->if_l2com; 6662 struct ieee80211_node *ni = vap->iv_bss; 6663 int error; 6664 6665 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6666 6667 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6668 /* Update adapter configuration. */ 6669 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6670 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6671 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6672 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6673 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6674 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6675 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6676 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6677 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6678 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6679 sc->rxon->cck_mask = 0; 6680 sc->rxon->ofdm_mask = 0x15; 6681 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6682 sc->rxon->cck_mask = 0x03; 6683 sc->rxon->ofdm_mask = 0; 6684 } else { 6685 /* Assume 802.11b/g. */ 6686 sc->rxon->cck_mask = 0x03; 6687 sc->rxon->ofdm_mask = 0x15; 6688 } 6689 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6690 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6691 sc->rxon->ofdm_mask); 6692 if (sc->sc_is_scanning) 6693 device_printf(sc->sc_dev, 6694 "%s: is_scanning set, before RXON\n", 6695 __func__); 6696 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6697 if (error != 0) { 6698 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6699 __func__, error); 6700 return error; 6701 } 6702 6703 /* Configuration has changed, set TX power accordingly. */ 6704 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6705 device_printf(sc->sc_dev, 6706 "%s: could not set TX power, error %d\n", __func__, error); 6707 return error; 6708 } 6709 /* 6710 * Reconfiguring RXON clears the firmware nodes table so we must 6711 * add the broadcast node again. 6712 */ 6713 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6714 device_printf(sc->sc_dev, 6715 "%s: could not add broadcast node, error %d\n", __func__, 6716 error); 6717 return error; 6718 } 6719 6720 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6721 6722 return 0; 6723 } 6724 6725 static int 6726 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6727 { 6728 struct iwn_ops *ops = &sc->ops; 6729 struct ifnet *ifp = sc->sc_ifp; 6730 struct ieee80211com *ic = ifp->if_l2com; 6731 struct ieee80211_node *ni = vap->iv_bss; 6732 struct iwn_node_info node; 6733 uint32_t htflags = 0; 6734 int error; 6735 6736 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6737 6738 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6739 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6740 /* Link LED blinks while monitoring. */ 6741 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6742 return 0; 6743 } 6744 if ((error = iwn_set_timing(sc, ni)) != 0) { 6745 device_printf(sc->sc_dev, 6746 "%s: could not set timing, error %d\n", __func__, error); 6747 return error; 6748 } 6749 6750 /* Update adapter configuration. */ 6751 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6752 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6753 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6754 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6755 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6756 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6757 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6758 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6759 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6760 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6761 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6762 sc->rxon->cck_mask = 0; 6763 sc->rxon->ofdm_mask = 0x15; 6764 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6765 sc->rxon->cck_mask = 0x03; 6766 sc->rxon->ofdm_mask = 0; 6767 } else { 6768 /* Assume 802.11b/g. */ 6769 sc->rxon->cck_mask = 0x0f; 6770 sc->rxon->ofdm_mask = 0x15; 6771 } 6772 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6773 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6774 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6775 switch (ic->ic_curhtprotmode) { 6776 case IEEE80211_HTINFO_OPMODE_HT20PR: 6777 htflags |= IWN_RXON_HT_MODEPURE40; 6778 break; 6779 default: 6780 htflags |= IWN_RXON_HT_MODEMIXED; 6781 break; 6782 } 6783 } 6784 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6785 htflags |= IWN_RXON_HT_HT40MINUS; 6786 } 6787 sc->rxon->flags |= htole32(htflags); 6788 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6789 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6790 sc->rxon->chan, sc->rxon->flags); 6791 if (sc->sc_is_scanning) 6792 device_printf(sc->sc_dev, 6793 "%s: is_scanning set, before RXON\n", 6794 __func__); 6795 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6796 if (error != 0) { 6797 device_printf(sc->sc_dev, 6798 "%s: could not update configuration, error %d\n", __func__, 6799 error); 6800 return error; 6801 } 6802 6803 /* Configuration has changed, set TX power accordingly. */ 6804 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6805 device_printf(sc->sc_dev, 6806 "%s: could not set TX power, error %d\n", __func__, error); 6807 return error; 6808 } 6809 6810 /* Fake a join to initialize the TX rate. */ 6811 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6812 iwn_newassoc(ni, 1); 6813 6814 /* Add BSS node. */ 6815 memset(&node, 0, sizeof node); 6816 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6817 node.id = IWN_ID_BSS; 6818 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6819 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6820 case IEEE80211_HTCAP_SMPS_ENA: 6821 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6822 break; 6823 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6824 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6825 break; 6826 } 6827 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6828 IWN_AMDPU_DENSITY(5)); /* 4us */ 6829 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6830 node.htflags |= htole32(IWN_NODE_HT40); 6831 } 6832 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6833 error = ops->add_node(sc, &node, 1); 6834 if (error != 0) { 6835 device_printf(sc->sc_dev, 6836 "%s: could not add BSS node, error %d\n", __func__, error); 6837 return error; 6838 } 6839 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6840 __func__, node.id); 6841 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6842 device_printf(sc->sc_dev, 6843 "%s: could not setup link quality for node %d, error %d\n", 6844 __func__, node.id, error); 6845 return error; 6846 } 6847 6848 if ((error = iwn_init_sensitivity(sc)) != 0) { 6849 device_printf(sc->sc_dev, 6850 "%s: could not set sensitivity, error %d\n", __func__, 6851 error); 6852 return error; 6853 } 6854 /* Start periodic calibration timer. */ 6855 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6856 sc->calib_cnt = 0; 6857 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6858 sc); 6859 6860 /* Link LED always on while associated. */ 6861 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6862 6863 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6864 6865 return 0; 6866 } 6867 6868 /* 6869 * This function is called by upper layer when an ADDBA request is received 6870 * from another STA and before the ADDBA response is sent. 6871 */ 6872 static int 6873 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6874 int baparamset, int batimeout, int baseqctl) 6875 { 6876 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6877 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6878 struct iwn_ops *ops = &sc->ops; 6879 struct iwn_node *wn = (void *)ni; 6880 struct iwn_node_info node; 6881 uint16_t ssn; 6882 uint8_t tid; 6883 int error; 6884 6885 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6886 6887 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6888 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 6889 6890 memset(&node, 0, sizeof node); 6891 node.id = wn->id; 6892 node.control = IWN_NODE_UPDATE; 6893 node.flags = IWN_FLAG_SET_ADDBA; 6894 node.addba_tid = tid; 6895 node.addba_ssn = htole16(ssn); 6896 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 6897 wn->id, tid, ssn); 6898 error = ops->add_node(sc, &node, 1); 6899 if (error != 0) 6900 return error; 6901 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 6902 #undef MS 6903 } 6904 6905 /* 6906 * This function is called by upper layer on teardown of an HT-immediate 6907 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 6908 */ 6909 static void 6910 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 6911 { 6912 struct ieee80211com *ic = ni->ni_ic; 6913 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6914 struct iwn_ops *ops = &sc->ops; 6915 struct iwn_node *wn = (void *)ni; 6916 struct iwn_node_info node; 6917 uint8_t tid; 6918 6919 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6920 6921 /* XXX: tid as an argument */ 6922 for (tid = 0; tid < WME_NUM_TID; tid++) { 6923 if (&ni->ni_rx_ampdu[tid] == rap) 6924 break; 6925 } 6926 6927 memset(&node, 0, sizeof node); 6928 node.id = wn->id; 6929 node.control = IWN_NODE_UPDATE; 6930 node.flags = IWN_FLAG_SET_DELBA; 6931 node.delba_tid = tid; 6932 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 6933 (void)ops->add_node(sc, &node, 1); 6934 sc->sc_ampdu_rx_stop(ni, rap); 6935 } 6936 6937 static int 6938 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6939 int dialogtoken, int baparamset, int batimeout) 6940 { 6941 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6942 int qid; 6943 6944 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6945 6946 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 6947 if (sc->qid2tap[qid] == NULL) 6948 break; 6949 } 6950 if (qid == sc->ntxqs) { 6951 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 6952 __func__); 6953 return 0; 6954 } 6955 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 6956 if (tap->txa_private == NULL) { 6957 device_printf(sc->sc_dev, 6958 "%s: failed to alloc TX aggregation structure\n", __func__); 6959 return 0; 6960 } 6961 sc->qid2tap[qid] = tap; 6962 *(int *)tap->txa_private = qid; 6963 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 6964 batimeout); 6965 } 6966 6967 static int 6968 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6969 int code, int baparamset, int batimeout) 6970 { 6971 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6972 int qid = *(int *)tap->txa_private; 6973 uint8_t tid = tap->txa_tid; 6974 int ret; 6975 6976 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6977 6978 if (code == IEEE80211_STATUS_SUCCESS) { 6979 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 6980 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 6981 if (ret != 1) 6982 return ret; 6983 } else { 6984 sc->qid2tap[qid] = NULL; 6985 free(tap->txa_private, M_DEVBUF); 6986 tap->txa_private = NULL; 6987 } 6988 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 6989 } 6990 6991 /* 6992 * This function is called by upper layer when an ADDBA response is received 6993 * from another STA. 6994 */ 6995 static int 6996 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 6997 uint8_t tid) 6998 { 6999 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 7000 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7001 struct iwn_ops *ops = &sc->ops; 7002 struct iwn_node *wn = (void *)ni; 7003 struct iwn_node_info node; 7004 int error, qid; 7005 7006 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7007 7008 /* Enable TX for the specified RA/TID. */ 7009 wn->disable_tid &= ~(1 << tid); 7010 memset(&node, 0, sizeof node); 7011 node.id = wn->id; 7012 node.control = IWN_NODE_UPDATE; 7013 node.flags = IWN_FLAG_SET_DISABLE_TID; 7014 node.disable_tid = htole16(wn->disable_tid); 7015 error = ops->add_node(sc, &node, 1); 7016 if (error != 0) 7017 return 0; 7018 7019 if ((error = iwn_nic_lock(sc)) != 0) 7020 return 0; 7021 qid = *(int *)tap->txa_private; 7022 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 7023 __func__, wn->id, tid, tap->txa_start, qid); 7024 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 7025 iwn_nic_unlock(sc); 7026 7027 iwn_set_link_quality(sc, ni); 7028 return 1; 7029 } 7030 7031 static void 7032 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 7033 { 7034 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 7035 struct iwn_ops *ops = &sc->ops; 7036 uint8_t tid = tap->txa_tid; 7037 int qid; 7038 7039 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7040 7041 sc->sc_addba_stop(ni, tap); 7042 7043 if (tap->txa_private == NULL) 7044 return; 7045 7046 qid = *(int *)tap->txa_private; 7047 if (sc->txq[qid].queued != 0) 7048 return; 7049 if (iwn_nic_lock(sc) != 0) 7050 return; 7051 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7052 iwn_nic_unlock(sc); 7053 sc->qid2tap[qid] = NULL; 7054 free(tap->txa_private, M_DEVBUF); 7055 tap->txa_private = NULL; 7056 } 7057 7058 static void 7059 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7060 int qid, uint8_t tid, uint16_t ssn) 7061 { 7062 struct iwn_node *wn = (void *)ni; 7063 7064 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7065 7066 /* Stop TX scheduler while we're changing its configuration. */ 7067 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7068 IWN4965_TXQ_STATUS_CHGACT); 7069 7070 /* Assign RA/TID translation to the queue. */ 7071 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7072 wn->id << 4 | tid); 7073 7074 /* Enable chain-building mode for the queue. */ 7075 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7076 7077 /* Set starting sequence number from the ADDBA request. */ 7078 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7079 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7080 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7081 7082 /* Set scheduler window size. */ 7083 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7084 IWN_SCHED_WINSZ); 7085 /* Set scheduler frame limit. */ 7086 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7087 IWN_SCHED_LIMIT << 16); 7088 7089 /* Enable interrupts for the queue. */ 7090 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7091 7092 /* Mark the queue as active. */ 7093 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7094 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7095 iwn_tid2fifo[tid] << 1); 7096 } 7097 7098 static void 7099 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7100 { 7101 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7102 7103 /* Stop TX scheduler while we're changing its configuration. */ 7104 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7105 IWN4965_TXQ_STATUS_CHGACT); 7106 7107 /* Set starting sequence number from the ADDBA request. */ 7108 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7109 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7110 7111 /* Disable interrupts for the queue. */ 7112 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7113 7114 /* Mark the queue as inactive. */ 7115 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7116 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7117 } 7118 7119 static void 7120 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7121 int qid, uint8_t tid, uint16_t ssn) 7122 { 7123 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7124 7125 struct iwn_node *wn = (void *)ni; 7126 7127 /* Stop TX scheduler while we're changing its configuration. */ 7128 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7129 IWN5000_TXQ_STATUS_CHGACT); 7130 7131 /* Assign RA/TID translation to the queue. */ 7132 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7133 wn->id << 4 | tid); 7134 7135 /* Enable chain-building mode for the queue. */ 7136 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7137 7138 /* Enable aggregation for the queue. */ 7139 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7140 7141 /* Set starting sequence number from the ADDBA request. */ 7142 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7143 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7144 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7145 7146 /* Set scheduler window size and frame limit. */ 7147 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7148 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7149 7150 /* Enable interrupts for the queue. */ 7151 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7152 7153 /* Mark the queue as active. */ 7154 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7155 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7156 } 7157 7158 static void 7159 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7160 { 7161 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7162 7163 /* Stop TX scheduler while we're changing its configuration. */ 7164 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7165 IWN5000_TXQ_STATUS_CHGACT); 7166 7167 /* Disable aggregation for the queue. */ 7168 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7169 7170 /* Set starting sequence number from the ADDBA request. */ 7171 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7172 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7173 7174 /* Disable interrupts for the queue. */ 7175 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7176 7177 /* Mark the queue as inactive. */ 7178 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7179 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7180 } 7181 7182 /* 7183 * Query calibration tables from the initialization firmware. We do this 7184 * only once at first boot. Called from a process context. 7185 */ 7186 static int 7187 iwn5000_query_calibration(struct iwn_softc *sc) 7188 { 7189 struct iwn5000_calib_config cmd; 7190 int error; 7191 7192 memset(&cmd, 0, sizeof cmd); 7193 cmd.ucode.once.enable = htole32(0xffffffff); 7194 cmd.ucode.once.start = htole32(0xffffffff); 7195 cmd.ucode.once.send = htole32(0xffffffff); 7196 cmd.ucode.flags = htole32(0xffffffff); 7197 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7198 __func__); 7199 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7200 if (error != 0) 7201 return error; 7202 7203 /* Wait at most two seconds for calibration to complete. */ 7204 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7205 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7206 return error; 7207 } 7208 7209 /* 7210 * Send calibration results to the runtime firmware. These results were 7211 * obtained on first boot from the initialization firmware. 7212 */ 7213 static int 7214 iwn5000_send_calibration(struct iwn_softc *sc) 7215 { 7216 int idx, error; 7217 7218 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7219 if (!(sc->base_params->calib_need & (1<<idx))) { 7220 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7221 "No need of calib %d\n", 7222 idx); 7223 continue; /* no need for this calib */ 7224 } 7225 if (sc->calibcmd[idx].buf == NULL) { 7226 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7227 "Need calib idx : %d but no available data\n", 7228 idx); 7229 continue; 7230 } 7231 7232 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7233 "send calibration result idx=%d len=%d\n", idx, 7234 sc->calibcmd[idx].len); 7235 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7236 sc->calibcmd[idx].len, 0); 7237 if (error != 0) { 7238 device_printf(sc->sc_dev, 7239 "%s: could not send calibration result, error %d\n", 7240 __func__, error); 7241 return error; 7242 } 7243 } 7244 return 0; 7245 } 7246 7247 static int 7248 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7249 { 7250 struct iwn5000_wimax_coex wimax; 7251 7252 #if 0 7253 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7254 /* Enable WiMAX coexistence for combo adapters. */ 7255 wimax.flags = 7256 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7257 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7258 IWN_WIMAX_COEX_STA_TABLE_VALID | 7259 IWN_WIMAX_COEX_ENABLE; 7260 memcpy(wimax.events, iwn6050_wimax_events, 7261 sizeof iwn6050_wimax_events); 7262 } else 7263 #endif 7264 { 7265 /* Disable WiMAX coexistence. */ 7266 wimax.flags = 0; 7267 memset(wimax.events, 0, sizeof wimax.events); 7268 } 7269 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7270 __func__); 7271 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7272 } 7273 7274 static int 7275 iwn5000_crystal_calib(struct iwn_softc *sc) 7276 { 7277 struct iwn5000_phy_calib_crystal cmd; 7278 7279 memset(&cmd, 0, sizeof cmd); 7280 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7281 cmd.ngroups = 1; 7282 cmd.isvalid = 1; 7283 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7284 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7285 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7286 cmd.cap_pin[0], cmd.cap_pin[1]); 7287 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7288 } 7289 7290 static int 7291 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7292 { 7293 struct iwn5000_phy_calib_temp_offset cmd; 7294 7295 memset(&cmd, 0, sizeof cmd); 7296 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7297 cmd.ngroups = 1; 7298 cmd.isvalid = 1; 7299 if (sc->eeprom_temp != 0) 7300 cmd.offset = htole16(sc->eeprom_temp); 7301 else 7302 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7303 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7304 le16toh(cmd.offset)); 7305 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7306 } 7307 7308 static int 7309 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7310 { 7311 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7312 7313 memset(&cmd, 0, sizeof cmd); 7314 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7315 cmd.ngroups = 1; 7316 cmd.isvalid = 1; 7317 if (sc->eeprom_temp != 0) { 7318 cmd.offset_low = htole16(sc->eeprom_temp); 7319 cmd.offset_high = htole16(sc->eeprom_temp_high); 7320 } else { 7321 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7322 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7323 } 7324 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7325 7326 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7327 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7328 le16toh(cmd.offset_low), 7329 le16toh(cmd.offset_high), 7330 le16toh(cmd.burnt_voltage_ref)); 7331 7332 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7333 } 7334 7335 /* 7336 * This function is called after the runtime firmware notifies us of its 7337 * readiness (called in a process context). 7338 */ 7339 static int 7340 iwn4965_post_alive(struct iwn_softc *sc) 7341 { 7342 int error, qid; 7343 7344 if ((error = iwn_nic_lock(sc)) != 0) 7345 return error; 7346 7347 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7348 7349 /* Clear TX scheduler state in SRAM. */ 7350 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7351 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7352 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7353 7354 /* Set physical address of TX scheduler rings (1KB aligned). */ 7355 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7356 7357 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7358 7359 /* Disable chain mode for all our 16 queues. */ 7360 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7361 7362 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7363 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7364 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7365 7366 /* Set scheduler window size. */ 7367 iwn_mem_write(sc, sc->sched_base + 7368 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7369 /* Set scheduler frame limit. */ 7370 iwn_mem_write(sc, sc->sched_base + 7371 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7372 IWN_SCHED_LIMIT << 16); 7373 } 7374 7375 /* Enable interrupts for all our 16 queues. */ 7376 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7377 /* Identify TX FIFO rings (0-7). */ 7378 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7379 7380 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7381 for (qid = 0; qid < 7; qid++) { 7382 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7383 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7384 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7385 } 7386 iwn_nic_unlock(sc); 7387 return 0; 7388 } 7389 7390 /* 7391 * This function is called after the initialization or runtime firmware 7392 * notifies us of its readiness (called in a process context). 7393 */ 7394 static int 7395 iwn5000_post_alive(struct iwn_softc *sc) 7396 { 7397 int error, qid; 7398 7399 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7400 7401 /* Switch to using ICT interrupt mode. */ 7402 iwn5000_ict_reset(sc); 7403 7404 if ((error = iwn_nic_lock(sc)) != 0){ 7405 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7406 return error; 7407 } 7408 7409 /* Clear TX scheduler state in SRAM. */ 7410 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7411 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7412 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7413 7414 /* Set physical address of TX scheduler rings (1KB aligned). */ 7415 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7416 7417 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7418 7419 /* Enable chain mode for all queues, except command queue. */ 7420 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7421 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7422 else 7423 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7424 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7425 7426 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7427 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7428 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7429 7430 iwn_mem_write(sc, sc->sched_base + 7431 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7432 /* Set scheduler window size and frame limit. */ 7433 iwn_mem_write(sc, sc->sched_base + 7434 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7435 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7436 } 7437 7438 /* Enable interrupts for all our 20 queues. */ 7439 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7440 /* Identify TX FIFO rings (0-7). */ 7441 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7442 7443 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7444 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7445 /* Mark TX rings as active. */ 7446 for (qid = 0; qid < 11; qid++) { 7447 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7448 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7449 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7450 } 7451 } else { 7452 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7453 for (qid = 0; qid < 7; qid++) { 7454 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7455 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7456 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7457 } 7458 } 7459 iwn_nic_unlock(sc); 7460 7461 /* Configure WiMAX coexistence for combo adapters. */ 7462 error = iwn5000_send_wimax_coex(sc); 7463 if (error != 0) { 7464 device_printf(sc->sc_dev, 7465 "%s: could not configure WiMAX coexistence, error %d\n", 7466 __func__, error); 7467 return error; 7468 } 7469 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7470 /* Perform crystal calibration. */ 7471 error = iwn5000_crystal_calib(sc); 7472 if (error != 0) { 7473 device_printf(sc->sc_dev, 7474 "%s: crystal calibration failed, error %d\n", 7475 __func__, error); 7476 return error; 7477 } 7478 } 7479 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7480 /* Query calibration from the initialization firmware. */ 7481 if ((error = iwn5000_query_calibration(sc)) != 0) { 7482 device_printf(sc->sc_dev, 7483 "%s: could not query calibration, error %d\n", 7484 __func__, error); 7485 return error; 7486 } 7487 /* 7488 * We have the calibration results now, reboot with the 7489 * runtime firmware (call ourselves recursively!) 7490 */ 7491 iwn_hw_stop(sc); 7492 error = iwn_hw_init(sc); 7493 } else { 7494 /* Send calibration results to runtime firmware. */ 7495 error = iwn5000_send_calibration(sc); 7496 } 7497 7498 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7499 7500 return error; 7501 } 7502 7503 /* 7504 * The firmware boot code is small and is intended to be copied directly into 7505 * the NIC internal memory (no DMA transfer). 7506 */ 7507 static int 7508 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7509 { 7510 int error, ntries; 7511 7512 size /= sizeof (uint32_t); 7513 7514 if ((error = iwn_nic_lock(sc)) != 0) 7515 return error; 7516 7517 /* Copy microcode image into NIC memory. */ 7518 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7519 (const uint32_t *)ucode, size); 7520 7521 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7522 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7523 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7524 7525 /* Start boot load now. */ 7526 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7527 7528 /* Wait for transfer to complete. */ 7529 for (ntries = 0; ntries < 1000; ntries++) { 7530 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7531 IWN_BSM_WR_CTRL_START)) 7532 break; 7533 DELAY(10); 7534 } 7535 if (ntries == 1000) { 7536 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7537 __func__); 7538 iwn_nic_unlock(sc); 7539 return ETIMEDOUT; 7540 } 7541 7542 /* Enable boot after power up. */ 7543 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7544 7545 iwn_nic_unlock(sc); 7546 return 0; 7547 } 7548 7549 static int 7550 iwn4965_load_firmware(struct iwn_softc *sc) 7551 { 7552 struct iwn_fw_info *fw = &sc->fw; 7553 struct iwn_dma_info *dma = &sc->fw_dma; 7554 int error; 7555 7556 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7557 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7558 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7559 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7560 fw->init.text, fw->init.textsz); 7561 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7562 7563 /* Tell adapter where to find initialization sections. */ 7564 if ((error = iwn_nic_lock(sc)) != 0) 7565 return error; 7566 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7567 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7568 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7569 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7570 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7571 iwn_nic_unlock(sc); 7572 7573 /* Load firmware boot code. */ 7574 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7575 if (error != 0) { 7576 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7577 __func__); 7578 return error; 7579 } 7580 /* Now press "execute". */ 7581 IWN_WRITE(sc, IWN_RESET, 0); 7582 7583 /* Wait at most one second for first alive notification. */ 7584 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7585 device_printf(sc->sc_dev, 7586 "%s: timeout waiting for adapter to initialize, error %d\n", 7587 __func__, error); 7588 return error; 7589 } 7590 7591 /* Retrieve current temperature for initial TX power calibration. */ 7592 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7593 sc->temp = iwn4965_get_temperature(sc); 7594 7595 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7596 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7597 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7598 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7599 fw->main.text, fw->main.textsz); 7600 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7601 7602 /* Tell adapter where to find runtime sections. */ 7603 if ((error = iwn_nic_lock(sc)) != 0) 7604 return error; 7605 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7606 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7607 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7608 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7609 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7610 IWN_FW_UPDATED | fw->main.textsz); 7611 iwn_nic_unlock(sc); 7612 7613 return 0; 7614 } 7615 7616 static int 7617 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7618 const uint8_t *section, int size) 7619 { 7620 struct iwn_dma_info *dma = &sc->fw_dma; 7621 int error; 7622 7623 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7624 7625 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7626 memcpy(dma->vaddr, section, size); 7627 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7628 7629 if ((error = iwn_nic_lock(sc)) != 0) 7630 return error; 7631 7632 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7633 IWN_FH_TX_CONFIG_DMA_PAUSE); 7634 7635 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7636 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7637 IWN_LOADDR(dma->paddr)); 7638 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7639 IWN_HIADDR(dma->paddr) << 28 | size); 7640 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7641 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7642 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7643 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7644 7645 /* Kick Flow Handler to start DMA transfer. */ 7646 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7647 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7648 7649 iwn_nic_unlock(sc); 7650 7651 /* Wait at most five seconds for FH DMA transfer to complete. */ 7652 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 7653 } 7654 7655 static int 7656 iwn5000_load_firmware(struct iwn_softc *sc) 7657 { 7658 struct iwn_fw_part *fw; 7659 int error; 7660 7661 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7662 7663 /* Load the initialization firmware on first boot only. */ 7664 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7665 &sc->fw.main : &sc->fw.init; 7666 7667 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7668 fw->text, fw->textsz); 7669 if (error != 0) { 7670 device_printf(sc->sc_dev, 7671 "%s: could not load firmware %s section, error %d\n", 7672 __func__, ".text", error); 7673 return error; 7674 } 7675 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7676 fw->data, fw->datasz); 7677 if (error != 0) { 7678 device_printf(sc->sc_dev, 7679 "%s: could not load firmware %s section, error %d\n", 7680 __func__, ".data", error); 7681 return error; 7682 } 7683 7684 /* Now press "execute". */ 7685 IWN_WRITE(sc, IWN_RESET, 0); 7686 return 0; 7687 } 7688 7689 /* 7690 * Extract text and data sections from a legacy firmware image. 7691 */ 7692 static int 7693 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7694 { 7695 const uint32_t *ptr; 7696 size_t hdrlen = 24; 7697 uint32_t rev; 7698 7699 ptr = (const uint32_t *)fw->data; 7700 rev = le32toh(*ptr++); 7701 7702 /* Check firmware API version. */ 7703 if (IWN_FW_API(rev) <= 1) { 7704 device_printf(sc->sc_dev, 7705 "%s: bad firmware, need API version >=2\n", __func__); 7706 return EINVAL; 7707 } 7708 if (IWN_FW_API(rev) >= 3) { 7709 /* Skip build number (version 2 header). */ 7710 hdrlen += 4; 7711 ptr++; 7712 } 7713 if (fw->size < hdrlen) { 7714 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7715 __func__, fw->size); 7716 return EINVAL; 7717 } 7718 fw->main.textsz = le32toh(*ptr++); 7719 fw->main.datasz = le32toh(*ptr++); 7720 fw->init.textsz = le32toh(*ptr++); 7721 fw->init.datasz = le32toh(*ptr++); 7722 fw->boot.textsz = le32toh(*ptr++); 7723 7724 /* Check that all firmware sections fit. */ 7725 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7726 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7727 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7728 __func__, fw->size); 7729 return EINVAL; 7730 } 7731 7732 /* Get pointers to firmware sections. */ 7733 fw->main.text = (const uint8_t *)ptr; 7734 fw->main.data = fw->main.text + fw->main.textsz; 7735 fw->init.text = fw->main.data + fw->main.datasz; 7736 fw->init.data = fw->init.text + fw->init.textsz; 7737 fw->boot.text = fw->init.data + fw->init.datasz; 7738 return 0; 7739 } 7740 7741 /* 7742 * Extract text and data sections from a TLV firmware image. 7743 */ 7744 static int 7745 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7746 uint16_t alt) 7747 { 7748 const struct iwn_fw_tlv_hdr *hdr; 7749 const struct iwn_fw_tlv *tlv; 7750 const uint8_t *ptr, *end; 7751 uint64_t altmask; 7752 uint32_t len, tmp; 7753 7754 if (fw->size < sizeof (*hdr)) { 7755 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7756 __func__, fw->size); 7757 return EINVAL; 7758 } 7759 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7760 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7761 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7762 __func__, le32toh(hdr->signature)); 7763 return EINVAL; 7764 } 7765 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7766 le32toh(hdr->build)); 7767 7768 /* 7769 * Select the closest supported alternative that is less than 7770 * or equal to the specified one. 7771 */ 7772 altmask = le64toh(hdr->altmask); 7773 while (alt > 0 && !(altmask & (1ULL << alt))) 7774 alt--; /* Downgrade. */ 7775 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7776 7777 ptr = (const uint8_t *)(hdr + 1); 7778 end = (const uint8_t *)(fw->data + fw->size); 7779 7780 /* Parse type-length-value fields. */ 7781 while (ptr + sizeof (*tlv) <= end) { 7782 tlv = (const struct iwn_fw_tlv *)ptr; 7783 len = le32toh(tlv->len); 7784 7785 ptr += sizeof (*tlv); 7786 if (ptr + len > end) { 7787 device_printf(sc->sc_dev, 7788 "%s: firmware too short: %zu bytes\n", __func__, 7789 fw->size); 7790 return EINVAL; 7791 } 7792 /* Skip other alternatives. */ 7793 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7794 goto next; 7795 7796 switch (le16toh(tlv->type)) { 7797 case IWN_FW_TLV_MAIN_TEXT: 7798 fw->main.text = ptr; 7799 fw->main.textsz = len; 7800 break; 7801 case IWN_FW_TLV_MAIN_DATA: 7802 fw->main.data = ptr; 7803 fw->main.datasz = len; 7804 break; 7805 case IWN_FW_TLV_INIT_TEXT: 7806 fw->init.text = ptr; 7807 fw->init.textsz = len; 7808 break; 7809 case IWN_FW_TLV_INIT_DATA: 7810 fw->init.data = ptr; 7811 fw->init.datasz = len; 7812 break; 7813 case IWN_FW_TLV_BOOT_TEXT: 7814 fw->boot.text = ptr; 7815 fw->boot.textsz = len; 7816 break; 7817 case IWN_FW_TLV_ENH_SENS: 7818 if (!len) 7819 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7820 break; 7821 case IWN_FW_TLV_PHY_CALIB: 7822 tmp = le32toh(*ptr); 7823 if (tmp < 253) { 7824 sc->reset_noise_gain = tmp; 7825 sc->noise_gain = tmp + 1; 7826 } 7827 break; 7828 case IWN_FW_TLV_PAN: 7829 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7830 DPRINTF(sc, IWN_DEBUG_RESET, 7831 "PAN Support found: %d\n", 1); 7832 break; 7833 case IWN_FW_TLV_FLAGS: 7834 if (len < sizeof(uint32_t)) 7835 break; 7836 if (len % sizeof(uint32_t)) 7837 break; 7838 sc->tlv_feature_flags = le32toh(*ptr); 7839 DPRINTF(sc, IWN_DEBUG_RESET, 7840 "%s: feature: 0x%08x\n", 7841 __func__, 7842 sc->tlv_feature_flags); 7843 break; 7844 case IWN_FW_TLV_PBREQ_MAXLEN: 7845 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7846 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7847 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7848 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7849 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7850 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7851 case IWN_FW_TLV_WOWLAN_INST: 7852 case IWN_FW_TLV_WOWLAN_DATA: 7853 DPRINTF(sc, IWN_DEBUG_RESET, 7854 "TLV type %d reconized but not handled\n", 7855 le16toh(tlv->type)); 7856 break; 7857 default: 7858 DPRINTF(sc, IWN_DEBUG_RESET, 7859 "TLV type %d not handled\n", le16toh(tlv->type)); 7860 break; 7861 } 7862 next: /* TLV fields are 32-bit aligned. */ 7863 ptr += (len + 3) & ~3; 7864 } 7865 return 0; 7866 } 7867 7868 static int 7869 iwn_read_firmware(struct iwn_softc *sc) 7870 { 7871 struct iwn_fw_info *fw = &sc->fw; 7872 int error; 7873 7874 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7875 7876 IWN_UNLOCK(sc); 7877 7878 memset(fw, 0, sizeof (*fw)); 7879 7880 /* Read firmware image from filesystem. */ 7881 sc->fw_fp = firmware_get(sc->fwname); 7882 if (sc->fw_fp == NULL) { 7883 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7884 __func__, sc->fwname); 7885 IWN_LOCK(sc); 7886 return EINVAL; 7887 } 7888 IWN_LOCK(sc); 7889 7890 fw->size = sc->fw_fp->datasize; 7891 fw->data = (const uint8_t *)sc->fw_fp->data; 7892 if (fw->size < sizeof (uint32_t)) { 7893 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7894 __func__, fw->size); 7895 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7896 sc->fw_fp = NULL; 7897 return EINVAL; 7898 } 7899 7900 /* Retrieve text and data sections. */ 7901 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 7902 error = iwn_read_firmware_leg(sc, fw); 7903 else 7904 error = iwn_read_firmware_tlv(sc, fw, 1); 7905 if (error != 0) { 7906 device_printf(sc->sc_dev, 7907 "%s: could not read firmware sections, error %d\n", 7908 __func__, error); 7909 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7910 sc->fw_fp = NULL; 7911 return error; 7912 } 7913 7914 /* Make sure text and data sections fit in hardware memory. */ 7915 if (fw->main.textsz > sc->fw_text_maxsz || 7916 fw->main.datasz > sc->fw_data_maxsz || 7917 fw->init.textsz > sc->fw_text_maxsz || 7918 fw->init.datasz > sc->fw_data_maxsz || 7919 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 7920 (fw->boot.textsz & 3) != 0) { 7921 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 7922 __func__); 7923 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7924 sc->fw_fp = NULL; 7925 return EINVAL; 7926 } 7927 7928 /* We can proceed with loading the firmware. */ 7929 return 0; 7930 } 7931 7932 static int 7933 iwn_clock_wait(struct iwn_softc *sc) 7934 { 7935 int ntries; 7936 7937 /* Set "initialization complete" bit. */ 7938 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7939 7940 /* Wait for clock stabilization. */ 7941 for (ntries = 0; ntries < 2500; ntries++) { 7942 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 7943 return 0; 7944 DELAY(10); 7945 } 7946 device_printf(sc->sc_dev, 7947 "%s: timeout waiting for clock stabilization\n", __func__); 7948 return ETIMEDOUT; 7949 } 7950 7951 static int 7952 iwn_apm_init(struct iwn_softc *sc) 7953 { 7954 uint32_t reg; 7955 int error; 7956 7957 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7958 7959 /* Disable L0s exit timer (NMI bug workaround). */ 7960 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 7961 /* Don't wait for ICH L0s (ICH bug workaround). */ 7962 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 7963 7964 /* Set FH wait threshold to max (HW bug under stress workaround). */ 7965 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 7966 7967 /* Enable HAP INTA to move adapter from L1a to L0s. */ 7968 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 7969 7970 /* Retrieve PCIe Active State Power Management (ASPM). */ 7971 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 7972 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 7973 if (reg & 0x02) /* L1 Entry enabled. */ 7974 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7975 else 7976 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7977 7978 if (sc->base_params->pll_cfg_val) 7979 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 7980 7981 /* Wait for clock stabilization before accessing prph. */ 7982 if ((error = iwn_clock_wait(sc)) != 0) 7983 return error; 7984 7985 if ((error = iwn_nic_lock(sc)) != 0) 7986 return error; 7987 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 7988 /* Enable DMA and BSM (Bootstrap State Machine). */ 7989 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7990 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 7991 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 7992 } else { 7993 /* Enable DMA. */ 7994 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7995 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7996 } 7997 DELAY(20); 7998 /* Disable L1-Active. */ 7999 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 8000 iwn_nic_unlock(sc); 8001 8002 return 0; 8003 } 8004 8005 static void 8006 iwn_apm_stop_master(struct iwn_softc *sc) 8007 { 8008 int ntries; 8009 8010 /* Stop busmaster DMA activity. */ 8011 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 8012 for (ntries = 0; ntries < 100; ntries++) { 8013 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 8014 return; 8015 DELAY(10); 8016 } 8017 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 8018 } 8019 8020 static void 8021 iwn_apm_stop(struct iwn_softc *sc) 8022 { 8023 iwn_apm_stop_master(sc); 8024 8025 /* Reset the entire device. */ 8026 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 8027 DELAY(10); 8028 /* Clear "initialization complete" bit. */ 8029 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8030 } 8031 8032 static int 8033 iwn4965_nic_config(struct iwn_softc *sc) 8034 { 8035 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8036 8037 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 8038 /* 8039 * I don't believe this to be correct but this is what the 8040 * vendor driver is doing. Probably the bits should not be 8041 * shifted in IWN_RFCFG_*. 8042 */ 8043 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8044 IWN_RFCFG_TYPE(sc->rfcfg) | 8045 IWN_RFCFG_STEP(sc->rfcfg) | 8046 IWN_RFCFG_DASH(sc->rfcfg)); 8047 } 8048 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8049 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8050 return 0; 8051 } 8052 8053 static int 8054 iwn5000_nic_config(struct iwn_softc *sc) 8055 { 8056 uint32_t tmp; 8057 int error; 8058 8059 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8060 8061 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8062 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8063 IWN_RFCFG_TYPE(sc->rfcfg) | 8064 IWN_RFCFG_STEP(sc->rfcfg) | 8065 IWN_RFCFG_DASH(sc->rfcfg)); 8066 } 8067 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8068 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8069 8070 if ((error = iwn_nic_lock(sc)) != 0) 8071 return error; 8072 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8073 8074 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8075 /* 8076 * Select first Switching Voltage Regulator (1.32V) to 8077 * solve a stability issue related to noisy DC2DC line 8078 * in the silicon of 1000 Series. 8079 */ 8080 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8081 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8082 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8083 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8084 } 8085 iwn_nic_unlock(sc); 8086 8087 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8088 /* Use internal power amplifier only. */ 8089 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8090 } 8091 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8092 /* Indicate that ROM calibration version is >=6. */ 8093 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8094 } 8095 if (sc->base_params->additional_gp_drv_bit) 8096 IWN_SETBITS(sc, IWN_GP_DRIVER, 8097 sc->base_params->additional_gp_drv_bit); 8098 return 0; 8099 } 8100 8101 /* 8102 * Take NIC ownership over Intel Active Management Technology (AMT). 8103 */ 8104 static int 8105 iwn_hw_prepare(struct iwn_softc *sc) 8106 { 8107 int ntries; 8108 8109 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8110 8111 /* Check if hardware is ready. */ 8112 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8113 for (ntries = 0; ntries < 5; ntries++) { 8114 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8115 IWN_HW_IF_CONFIG_NIC_READY) 8116 return 0; 8117 DELAY(10); 8118 } 8119 8120 /* Hardware not ready, force into ready state. */ 8121 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8122 for (ntries = 0; ntries < 15000; ntries++) { 8123 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8124 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8125 break; 8126 DELAY(10); 8127 } 8128 if (ntries == 15000) 8129 return ETIMEDOUT; 8130 8131 /* Hardware should be ready now. */ 8132 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8133 for (ntries = 0; ntries < 5; ntries++) { 8134 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8135 IWN_HW_IF_CONFIG_NIC_READY) 8136 return 0; 8137 DELAY(10); 8138 } 8139 return ETIMEDOUT; 8140 } 8141 8142 static int 8143 iwn_hw_init(struct iwn_softc *sc) 8144 { 8145 struct iwn_ops *ops = &sc->ops; 8146 int error, chnl, qid; 8147 8148 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8149 8150 /* Clear pending interrupts. */ 8151 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8152 8153 if ((error = iwn_apm_init(sc)) != 0) { 8154 device_printf(sc->sc_dev, 8155 "%s: could not power ON adapter, error %d\n", __func__, 8156 error); 8157 return error; 8158 } 8159 8160 /* Select VMAIN power source. */ 8161 if ((error = iwn_nic_lock(sc)) != 0) 8162 return error; 8163 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8164 iwn_nic_unlock(sc); 8165 8166 /* Perform adapter-specific initialization. */ 8167 if ((error = ops->nic_config(sc)) != 0) 8168 return error; 8169 8170 /* Initialize RX ring. */ 8171 if ((error = iwn_nic_lock(sc)) != 0) 8172 return error; 8173 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8174 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8175 /* Set physical address of RX ring (256-byte aligned). */ 8176 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8177 /* Set physical address of RX status (16-byte aligned). */ 8178 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8179 /* Enable RX. */ 8180 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8181 IWN_FH_RX_CONFIG_ENA | 8182 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8183 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8184 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8185 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8186 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8187 iwn_nic_unlock(sc); 8188 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8189 8190 if ((error = iwn_nic_lock(sc)) != 0) 8191 return error; 8192 8193 /* Initialize TX scheduler. */ 8194 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8195 8196 /* Set physical address of "keep warm" page (16-byte aligned). */ 8197 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8198 8199 /* Initialize TX rings. */ 8200 for (qid = 0; qid < sc->ntxqs; qid++) { 8201 struct iwn_tx_ring *txq = &sc->txq[qid]; 8202 8203 /* Set physical address of TX ring (256-byte aligned). */ 8204 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8205 txq->desc_dma.paddr >> 8); 8206 } 8207 iwn_nic_unlock(sc); 8208 8209 /* Enable DMA channels. */ 8210 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8211 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8212 IWN_FH_TX_CONFIG_DMA_ENA | 8213 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8214 } 8215 8216 /* Clear "radio off" and "commands blocked" bits. */ 8217 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8218 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8219 8220 /* Clear pending interrupts. */ 8221 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8222 /* Enable interrupt coalescing. */ 8223 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8224 /* Enable interrupts. */ 8225 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8226 8227 /* _Really_ make sure "radio off" bit is cleared! */ 8228 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8229 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8230 8231 /* Enable shadow registers. */ 8232 if (sc->base_params->shadow_reg_enable) 8233 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8234 8235 if ((error = ops->load_firmware(sc)) != 0) { 8236 device_printf(sc->sc_dev, 8237 "%s: could not load firmware, error %d\n", __func__, 8238 error); 8239 return error; 8240 } 8241 /* Wait at most one second for firmware alive notification. */ 8242 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8243 device_printf(sc->sc_dev, 8244 "%s: timeout waiting for adapter to initialize, error %d\n", 8245 __func__, error); 8246 return error; 8247 } 8248 /* Do post-firmware initialization. */ 8249 8250 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8251 8252 return ops->post_alive(sc); 8253 } 8254 8255 static void 8256 iwn_hw_stop(struct iwn_softc *sc) 8257 { 8258 int chnl, qid, ntries; 8259 8260 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8261 8262 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8263 8264 /* Disable interrupts. */ 8265 IWN_WRITE(sc, IWN_INT_MASK, 0); 8266 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8267 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8268 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8269 8270 /* Make sure we no longer hold the NIC lock. */ 8271 iwn_nic_unlock(sc); 8272 8273 /* Stop TX scheduler. */ 8274 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8275 8276 /* Stop all DMA channels. */ 8277 if (iwn_nic_lock(sc) == 0) { 8278 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8279 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8280 for (ntries = 0; ntries < 200; ntries++) { 8281 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8282 IWN_FH_TX_STATUS_IDLE(chnl)) 8283 break; 8284 DELAY(10); 8285 } 8286 } 8287 iwn_nic_unlock(sc); 8288 } 8289 8290 /* Stop RX ring. */ 8291 iwn_reset_rx_ring(sc, &sc->rxq); 8292 8293 /* Reset all TX rings. */ 8294 for (qid = 0; qid < sc->ntxqs; qid++) 8295 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8296 8297 if (iwn_nic_lock(sc) == 0) { 8298 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8299 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8300 iwn_nic_unlock(sc); 8301 } 8302 DELAY(5); 8303 /* Power OFF adapter. */ 8304 iwn_apm_stop(sc); 8305 } 8306 8307 static void 8308 iwn_radio_on(void *arg0, int pending) 8309 { 8310 struct iwn_softc *sc = arg0; 8311 struct ifnet *ifp = sc->sc_ifp; 8312 struct ieee80211com *ic = ifp->if_l2com; 8313 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8314 8315 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8316 8317 if (vap != NULL) { 8318 iwn_init(sc); 8319 ieee80211_init(vap); 8320 } 8321 } 8322 8323 static void 8324 iwn_radio_off(void *arg0, int pending) 8325 { 8326 struct iwn_softc *sc = arg0; 8327 struct ifnet *ifp = sc->sc_ifp; 8328 struct ieee80211com *ic = ifp->if_l2com; 8329 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8330 8331 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8332 8333 iwn_stop(sc); 8334 if (vap != NULL) 8335 ieee80211_stop(vap); 8336 8337 /* Enable interrupts to get RF toggle notification. */ 8338 IWN_LOCK(sc); 8339 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8340 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8341 IWN_UNLOCK(sc); 8342 } 8343 8344 static void 8345 iwn_init_locked(struct iwn_softc *sc) 8346 { 8347 struct ifnet *ifp = sc->sc_ifp; 8348 int error; 8349 8350 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8351 8352 IWN_LOCK_ASSERT(sc); 8353 8354 if ((error = iwn_hw_prepare(sc)) != 0) { 8355 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8356 __func__, error); 8357 goto fail; 8358 } 8359 8360 /* Initialize interrupt mask to default value. */ 8361 sc->int_mask = IWN_INT_MASK_DEF; 8362 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8363 8364 /* Check that the radio is not disabled by hardware switch. */ 8365 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8366 device_printf(sc->sc_dev, 8367 "radio is disabled by hardware switch\n"); 8368 /* Enable interrupts to get RF toggle notifications. */ 8369 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8370 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8371 return; 8372 } 8373 8374 /* Read firmware images from the filesystem. */ 8375 if ((error = iwn_read_firmware(sc)) != 0) { 8376 device_printf(sc->sc_dev, 8377 "%s: could not read firmware, error %d\n", __func__, 8378 error); 8379 goto fail; 8380 } 8381 8382 /* Initialize hardware and upload firmware. */ 8383 error = iwn_hw_init(sc); 8384 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8385 sc->fw_fp = NULL; 8386 if (error != 0) { 8387 device_printf(sc->sc_dev, 8388 "%s: could not initialize hardware, error %d\n", __func__, 8389 error); 8390 goto fail; 8391 } 8392 8393 /* Configure adapter now that it is ready. */ 8394 if ((error = iwn_config(sc)) != 0) { 8395 device_printf(sc->sc_dev, 8396 "%s: could not configure device, error %d\n", __func__, 8397 error); 8398 goto fail; 8399 } 8400 8401 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 8402 ifp->if_drv_flags |= IFF_DRV_RUNNING; 8403 8404 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 8405 8406 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8407 8408 return; 8409 8410 fail: iwn_stop_locked(sc); 8411 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8412 } 8413 8414 static void 8415 iwn_init(void *arg) 8416 { 8417 struct iwn_softc *sc = arg; 8418 struct ifnet *ifp = sc->sc_ifp; 8419 struct ieee80211com *ic = ifp->if_l2com; 8420 8421 IWN_LOCK(sc); 8422 iwn_init_locked(sc); 8423 IWN_UNLOCK(sc); 8424 8425 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 8426 ieee80211_start_all(ic); 8427 } 8428 8429 static void 8430 iwn_stop_locked(struct iwn_softc *sc) 8431 { 8432 struct ifnet *ifp = sc->sc_ifp; 8433 8434 IWN_LOCK_ASSERT(sc); 8435 8436 sc->sc_is_scanning = 0; 8437 sc->sc_tx_timer = 0; 8438 callout_stop(&sc->watchdog_to); 8439 callout_stop(&sc->calib_to); 8440 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 8441 8442 /* Power OFF hardware. */ 8443 iwn_hw_stop(sc); 8444 } 8445 8446 static void 8447 iwn_stop(struct iwn_softc *sc) 8448 { 8449 IWN_LOCK(sc); 8450 iwn_stop_locked(sc); 8451 IWN_UNLOCK(sc); 8452 } 8453 8454 /* 8455 * Callback from net80211 to start a scan. 8456 */ 8457 static void 8458 iwn_scan_start(struct ieee80211com *ic) 8459 { 8460 struct ifnet *ifp = ic->ic_ifp; 8461 struct iwn_softc *sc = ifp->if_softc; 8462 8463 IWN_LOCK(sc); 8464 /* make the link LED blink while we're scanning */ 8465 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8466 IWN_UNLOCK(sc); 8467 } 8468 8469 /* 8470 * Callback from net80211 to terminate a scan. 8471 */ 8472 static void 8473 iwn_scan_end(struct ieee80211com *ic) 8474 { 8475 struct ifnet *ifp = ic->ic_ifp; 8476 struct iwn_softc *sc = ifp->if_softc; 8477 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8478 8479 IWN_LOCK(sc); 8480 if (vap->iv_state == IEEE80211_S_RUN) { 8481 /* Set link LED to ON status if we are associated */ 8482 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8483 } 8484 IWN_UNLOCK(sc); 8485 } 8486 8487 /* 8488 * Callback from net80211 to force a channel change. 8489 */ 8490 static void 8491 iwn_set_channel(struct ieee80211com *ic) 8492 { 8493 const struct ieee80211_channel *c = ic->ic_curchan; 8494 struct ifnet *ifp = ic->ic_ifp; 8495 struct iwn_softc *sc = ifp->if_softc; 8496 int error; 8497 8498 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8499 8500 IWN_LOCK(sc); 8501 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8502 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8503 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8504 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8505 8506 /* 8507 * Only need to set the channel in Monitor mode. AP scanning and auth 8508 * are already taken care of by their respective firmware commands. 8509 */ 8510 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8511 error = iwn_config(sc); 8512 if (error != 0) 8513 device_printf(sc->sc_dev, 8514 "%s: error %d settting channel\n", __func__, error); 8515 } 8516 IWN_UNLOCK(sc); 8517 } 8518 8519 /* 8520 * Callback from net80211 to start scanning of the current channel. 8521 */ 8522 static void 8523 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8524 { 8525 struct ieee80211vap *vap = ss->ss_vap; 8526 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8527 struct ieee80211com *ic = vap->iv_ic; 8528 int error; 8529 8530 IWN_LOCK(sc); 8531 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 8532 IWN_UNLOCK(sc); 8533 if (error != 0) 8534 ieee80211_cancel_scan(vap); 8535 } 8536 8537 /* 8538 * Callback from net80211 to handle the minimum dwell time being met. 8539 * The intent is to terminate the scan but we just let the firmware 8540 * notify us when it's finished as we have no safe way to abort it. 8541 */ 8542 static void 8543 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8544 { 8545 /* NB: don't try to abort scan; wait for firmware to finish */ 8546 } 8547 8548 static void 8549 iwn_hw_reset(void *arg0, int pending) 8550 { 8551 struct iwn_softc *sc = arg0; 8552 struct ifnet *ifp = sc->sc_ifp; 8553 struct ieee80211com *ic = ifp->if_l2com; 8554 8555 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8556 8557 iwn_stop(sc); 8558 iwn_init(sc); 8559 ieee80211_notify_radio(ic, 1); 8560 } 8561 #ifdef IWN_DEBUG 8562 #define IWN_DESC(x) case x: return #x 8563 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8564 8565 /* 8566 * Translate CSR code to string 8567 */ 8568 static char *iwn_get_csr_string(int csr) 8569 { 8570 switch (csr) { 8571 IWN_DESC(IWN_HW_IF_CONFIG); 8572 IWN_DESC(IWN_INT_COALESCING); 8573 IWN_DESC(IWN_INT); 8574 IWN_DESC(IWN_INT_MASK); 8575 IWN_DESC(IWN_FH_INT); 8576 IWN_DESC(IWN_GPIO_IN); 8577 IWN_DESC(IWN_RESET); 8578 IWN_DESC(IWN_GP_CNTRL); 8579 IWN_DESC(IWN_HW_REV); 8580 IWN_DESC(IWN_EEPROM); 8581 IWN_DESC(IWN_EEPROM_GP); 8582 IWN_DESC(IWN_OTP_GP); 8583 IWN_DESC(IWN_GIO); 8584 IWN_DESC(IWN_GP_UCODE); 8585 IWN_DESC(IWN_GP_DRIVER); 8586 IWN_DESC(IWN_UCODE_GP1); 8587 IWN_DESC(IWN_UCODE_GP2); 8588 IWN_DESC(IWN_LED); 8589 IWN_DESC(IWN_DRAM_INT_TBL); 8590 IWN_DESC(IWN_GIO_CHICKEN); 8591 IWN_DESC(IWN_ANA_PLL); 8592 IWN_DESC(IWN_HW_REV_WA); 8593 IWN_DESC(IWN_DBG_HPET_MEM); 8594 default: 8595 return "UNKNOWN CSR"; 8596 } 8597 } 8598 8599 /* 8600 * This function print firmware register 8601 */ 8602 static void 8603 iwn_debug_register(struct iwn_softc *sc) 8604 { 8605 int i; 8606 static const uint32_t csr_tbl[] = { 8607 IWN_HW_IF_CONFIG, 8608 IWN_INT_COALESCING, 8609 IWN_INT, 8610 IWN_INT_MASK, 8611 IWN_FH_INT, 8612 IWN_GPIO_IN, 8613 IWN_RESET, 8614 IWN_GP_CNTRL, 8615 IWN_HW_REV, 8616 IWN_EEPROM, 8617 IWN_EEPROM_GP, 8618 IWN_OTP_GP, 8619 IWN_GIO, 8620 IWN_GP_UCODE, 8621 IWN_GP_DRIVER, 8622 IWN_UCODE_GP1, 8623 IWN_UCODE_GP2, 8624 IWN_LED, 8625 IWN_DRAM_INT_TBL, 8626 IWN_GIO_CHICKEN, 8627 IWN_ANA_PLL, 8628 IWN_HW_REV_WA, 8629 IWN_DBG_HPET_MEM, 8630 }; 8631 DPRINTF(sc, IWN_DEBUG_REGISTER, 8632 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8633 "\n"); 8634 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8635 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8636 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8637 if ((i+1) % 3 == 0) 8638 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8639 } 8640 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8641 } 8642 #endif 8643