1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_wlan.h" 31 #include "opt_iwn.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/sysctl.h> 36 #include <sys/mbuf.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/clock.h> 53 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ethernet.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 #include <net/if_types.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/if_ether.h> 70 #include <netinet/ip.h> 71 72 #include <net80211/ieee80211_var.h> 73 #include <net80211/ieee80211_radiotap.h> 74 #include <net80211/ieee80211_regdomain.h> 75 #include <net80211/ieee80211_ratectl.h> 76 77 #include <dev/iwn/if_iwnreg.h> 78 #include <dev/iwn/if_iwnvar.h> 79 #include <dev/iwn/if_iwn_devid.h> 80 #include <dev/iwn/if_iwn_chip_cfg.h> 81 #include <dev/iwn/if_iwn_debug.h> 82 83 struct iwn_ident { 84 uint16_t vendor; 85 uint16_t device; 86 const char *name; 87 }; 88 89 static const struct iwn_ident iwn_ident_table[] = { 90 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 91 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 92 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 93 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 94 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 95 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 96 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 97 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 98 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 99 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 100 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 101 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 102 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 103 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 104 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 105 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 106 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 107 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 108 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 109 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 110 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 111 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 112 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 113 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 114 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 115 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 116 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 117 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 118 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 119 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 120 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 121 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 122 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 123 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 124 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 125 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 126 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 127 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 128 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 129 { 0, 0, NULL } 130 }; 131 132 static int iwn_probe(device_t); 133 static int iwn_attach(device_t); 134 static int iwn4965_attach(struct iwn_softc *, uint16_t); 135 static int iwn5000_attach(struct iwn_softc *, uint16_t); 136 static int iwn_config_specific(struct iwn_softc *, uint16_t); 137 static void iwn_radiotap_attach(struct iwn_softc *); 138 static void iwn_sysctlattach(struct iwn_softc *); 139 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 140 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 141 const uint8_t [IEEE80211_ADDR_LEN], 142 const uint8_t [IEEE80211_ADDR_LEN]); 143 static void iwn_vap_delete(struct ieee80211vap *); 144 static int iwn_detach(device_t); 145 static int iwn_shutdown(device_t); 146 static int iwn_suspend(device_t); 147 static int iwn_resume(device_t); 148 static int iwn_nic_lock(struct iwn_softc *); 149 static int iwn_eeprom_lock(struct iwn_softc *); 150 static int iwn_init_otprom(struct iwn_softc *); 151 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 152 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 153 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 154 void **, bus_size_t, bus_size_t); 155 static void iwn_dma_contig_free(struct iwn_dma_info *); 156 static int iwn_alloc_sched(struct iwn_softc *); 157 static void iwn_free_sched(struct iwn_softc *); 158 static int iwn_alloc_kw(struct iwn_softc *); 159 static void iwn_free_kw(struct iwn_softc *); 160 static int iwn_alloc_ict(struct iwn_softc *); 161 static void iwn_free_ict(struct iwn_softc *); 162 static int iwn_alloc_fwmem(struct iwn_softc *); 163 static void iwn_free_fwmem(struct iwn_softc *); 164 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 165 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 166 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 167 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 168 int); 169 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 170 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 171 static void iwn5000_ict_reset(struct iwn_softc *); 172 static int iwn_read_eeprom(struct iwn_softc *, 173 uint8_t macaddr[IEEE80211_ADDR_LEN]); 174 static void iwn4965_read_eeprom(struct iwn_softc *); 175 #ifdef IWN_DEBUG 176 static void iwn4965_print_power_group(struct iwn_softc *, int); 177 #endif 178 static void iwn5000_read_eeprom(struct iwn_softc *); 179 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 180 static void iwn_read_eeprom_band(struct iwn_softc *, int); 181 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 182 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 183 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 184 struct ieee80211_channel *); 185 static int iwn_setregdomain(struct ieee80211com *, 186 struct ieee80211_regdomain *, int, 187 struct ieee80211_channel[]); 188 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 189 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 190 const uint8_t mac[IEEE80211_ADDR_LEN]); 191 static void iwn_newassoc(struct ieee80211_node *, int); 192 static int iwn_media_change(struct ifnet *); 193 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 194 static void iwn_calib_timeout(void *); 195 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 196 struct iwn_rx_data *); 197 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 198 struct iwn_rx_data *); 199 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 200 struct iwn_rx_data *); 201 static void iwn5000_rx_calib_results(struct iwn_softc *, 202 struct iwn_rx_desc *, struct iwn_rx_data *); 203 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 204 struct iwn_rx_data *); 205 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 206 struct iwn_rx_data *); 207 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 208 struct iwn_rx_data *); 209 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 210 uint8_t); 211 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 212 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 213 static void iwn_notif_intr(struct iwn_softc *); 214 static void iwn_wakeup_intr(struct iwn_softc *); 215 static void iwn_rftoggle_intr(struct iwn_softc *); 216 static void iwn_fatal_intr(struct iwn_softc *); 217 static void iwn_intr(void *); 218 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 219 uint16_t); 220 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 221 uint16_t); 222 #ifdef notyet 223 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 224 #endif 225 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 226 struct ieee80211_node *); 227 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 228 struct ieee80211_node *, 229 const struct ieee80211_bpf_params *params); 230 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 231 const struct ieee80211_bpf_params *); 232 static void iwn_start(struct ifnet *); 233 static void iwn_start_locked(struct ifnet *); 234 static void iwn_watchdog(void *); 235 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 236 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 237 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 238 int); 239 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 240 int); 241 static int iwn_set_link_quality(struct iwn_softc *, 242 struct ieee80211_node *); 243 static int iwn_add_broadcast_node(struct iwn_softc *, int); 244 static int iwn_updateedca(struct ieee80211com *); 245 static void iwn_update_mcast(struct ifnet *); 246 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 247 static int iwn_set_critical_temp(struct iwn_softc *); 248 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 249 static void iwn4965_power_calibration(struct iwn_softc *, int); 250 static int iwn4965_set_txpower(struct iwn_softc *, 251 struct ieee80211_channel *, int); 252 static int iwn5000_set_txpower(struct iwn_softc *, 253 struct ieee80211_channel *, int); 254 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 255 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 256 static int iwn_get_noise(const struct iwn_rx_general_stats *); 257 static int iwn4965_get_temperature(struct iwn_softc *); 258 static int iwn5000_get_temperature(struct iwn_softc *); 259 static int iwn_init_sensitivity(struct iwn_softc *); 260 static void iwn_collect_noise(struct iwn_softc *, 261 const struct iwn_rx_general_stats *); 262 static int iwn4965_init_gains(struct iwn_softc *); 263 static int iwn5000_init_gains(struct iwn_softc *); 264 static int iwn4965_set_gains(struct iwn_softc *); 265 static int iwn5000_set_gains(struct iwn_softc *); 266 static void iwn_tune_sensitivity(struct iwn_softc *, 267 const struct iwn_rx_stats *); 268 static void iwn_save_stats_counters(struct iwn_softc *, 269 const struct iwn_stats *); 270 static int iwn_send_sensitivity(struct iwn_softc *); 271 static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 272 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 273 static int iwn_send_btcoex(struct iwn_softc *); 274 static int iwn_send_advanced_btcoex(struct iwn_softc *); 275 static int iwn5000_runtime_calib(struct iwn_softc *); 276 static int iwn_config(struct iwn_softc *); 277 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 278 static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 279 struct ieee80211_scan_state *, struct ieee80211_channel *); 280 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 281 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 282 static int iwn_ampdu_rx_start(struct ieee80211_node *, 283 struct ieee80211_rx_ampdu *, int, int, int); 284 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 285 struct ieee80211_rx_ampdu *); 286 static int iwn_addba_request(struct ieee80211_node *, 287 struct ieee80211_tx_ampdu *, int, int, int); 288 static int iwn_addba_response(struct ieee80211_node *, 289 struct ieee80211_tx_ampdu *, int, int, int); 290 static int iwn_ampdu_tx_start(struct ieee80211com *, 291 struct ieee80211_node *, uint8_t); 292 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 293 struct ieee80211_tx_ampdu *); 294 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 295 struct ieee80211_node *, int, uint8_t, uint16_t); 296 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 297 uint8_t, uint16_t); 298 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 299 struct ieee80211_node *, int, uint8_t, uint16_t); 300 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 301 uint8_t, uint16_t); 302 static int iwn5000_query_calibration(struct iwn_softc *); 303 static int iwn5000_send_calibration(struct iwn_softc *); 304 static int iwn5000_send_wimax_coex(struct iwn_softc *); 305 static int iwn5000_crystal_calib(struct iwn_softc *); 306 static int iwn5000_temp_offset_calib(struct iwn_softc *); 307 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 308 static int iwn4965_post_alive(struct iwn_softc *); 309 static int iwn5000_post_alive(struct iwn_softc *); 310 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 311 int); 312 static int iwn4965_load_firmware(struct iwn_softc *); 313 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 314 const uint8_t *, int); 315 static int iwn5000_load_firmware(struct iwn_softc *); 316 static int iwn_read_firmware_leg(struct iwn_softc *, 317 struct iwn_fw_info *); 318 static int iwn_read_firmware_tlv(struct iwn_softc *, 319 struct iwn_fw_info *, uint16_t); 320 static int iwn_read_firmware(struct iwn_softc *); 321 static int iwn_clock_wait(struct iwn_softc *); 322 static int iwn_apm_init(struct iwn_softc *); 323 static void iwn_apm_stop_master(struct iwn_softc *); 324 static void iwn_apm_stop(struct iwn_softc *); 325 static int iwn4965_nic_config(struct iwn_softc *); 326 static int iwn5000_nic_config(struct iwn_softc *); 327 static int iwn_hw_prepare(struct iwn_softc *); 328 static int iwn_hw_init(struct iwn_softc *); 329 static void iwn_hw_stop(struct iwn_softc *); 330 static void iwn_radio_on(void *, int); 331 static void iwn_radio_off(void *, int); 332 static void iwn_init_locked(struct iwn_softc *); 333 static void iwn_init(void *); 334 static void iwn_stop_locked(struct iwn_softc *); 335 static void iwn_stop(struct iwn_softc *); 336 static void iwn_scan_start(struct ieee80211com *); 337 static void iwn_scan_end(struct ieee80211com *); 338 static void iwn_set_channel(struct ieee80211com *); 339 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 340 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 341 static void iwn_hw_reset(void *, int); 342 #ifdef IWN_DEBUG 343 static char *iwn_get_csr_string(int); 344 static void iwn_debug_register(struct iwn_softc *); 345 #endif 346 347 static device_method_t iwn_methods[] = { 348 /* Device interface */ 349 DEVMETHOD(device_probe, iwn_probe), 350 DEVMETHOD(device_attach, iwn_attach), 351 DEVMETHOD(device_detach, iwn_detach), 352 DEVMETHOD(device_shutdown, iwn_shutdown), 353 DEVMETHOD(device_suspend, iwn_suspend), 354 DEVMETHOD(device_resume, iwn_resume), 355 356 DEVMETHOD_END 357 }; 358 359 static driver_t iwn_driver = { 360 "iwn", 361 iwn_methods, 362 sizeof(struct iwn_softc) 363 }; 364 static devclass_t iwn_devclass; 365 366 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 367 368 MODULE_VERSION(iwn, 1); 369 370 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 371 MODULE_DEPEND(iwn, pci, 1, 1, 1); 372 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 373 374 static int 375 iwn_probe(device_t dev) 376 { 377 const struct iwn_ident *ident; 378 379 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 380 if (pci_get_vendor(dev) == ident->vendor && 381 pci_get_device(dev) == ident->device) { 382 device_set_desc(dev, ident->name); 383 return (BUS_PROBE_DEFAULT); 384 } 385 } 386 return ENXIO; 387 } 388 389 static int 390 iwn_attach(device_t dev) 391 { 392 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 393 struct ieee80211com *ic; 394 struct ifnet *ifp; 395 int i, error, rid; 396 uint8_t macaddr[IEEE80211_ADDR_LEN]; 397 398 sc->sc_dev = dev; 399 400 #ifdef IWN_DEBUG 401 error = resource_int_value(device_get_name(sc->sc_dev), 402 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 403 if (error != 0) 404 sc->sc_debug = 0; 405 #else 406 sc->sc_debug = 0; 407 #endif 408 409 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 410 411 /* 412 * Get the offset of the PCI Express Capability Structure in PCI 413 * Configuration Space. 414 */ 415 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 416 if (error != 0) { 417 device_printf(dev, "PCIe capability structure not found!\n"); 418 return error; 419 } 420 421 /* Clear device-specific "PCI retry timeout" register (41h). */ 422 pci_write_config(dev, 0x41, 0, 1); 423 424 /* Enable bus-mastering. */ 425 pci_enable_busmaster(dev); 426 427 rid = PCIR_BAR(0); 428 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 429 RF_ACTIVE); 430 if (sc->mem == NULL) { 431 device_printf(dev, "can't map mem space\n"); 432 error = ENOMEM; 433 return error; 434 } 435 sc->sc_st = rman_get_bustag(sc->mem); 436 sc->sc_sh = rman_get_bushandle(sc->mem); 437 438 i = 1; 439 rid = 0; 440 if (pci_alloc_msi(dev, &i) == 0) 441 rid = 1; 442 /* Install interrupt handler. */ 443 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 444 (rid != 0 ? 0 : RF_SHAREABLE)); 445 if (sc->irq == NULL) { 446 device_printf(dev, "can't map interrupt\n"); 447 error = ENOMEM; 448 goto fail; 449 } 450 451 IWN_LOCK_INIT(sc); 452 453 /* Read hardware revision and attach. */ 454 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 455 & IWN_HW_REV_TYPE_MASK; 456 sc->subdevice_id = pci_get_subdevice(dev); 457 458 /* 459 * 4965 versus 5000 and later have different methods. 460 * Let's set those up first. 461 */ 462 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 463 error = iwn4965_attach(sc, pci_get_device(dev)); 464 else 465 error = iwn5000_attach(sc, pci_get_device(dev)); 466 if (error != 0) { 467 device_printf(dev, "could not attach device, error %d\n", 468 error); 469 goto fail; 470 } 471 472 /* 473 * Next, let's setup the various parameters of each NIC. 474 */ 475 error = iwn_config_specific(sc, pci_get_device(dev)); 476 if (error != 0) { 477 device_printf(dev, "could not attach device, error %d\n", 478 error); 479 goto fail; 480 } 481 482 if ((error = iwn_hw_prepare(sc)) != 0) { 483 device_printf(dev, "hardware not ready, error %d\n", error); 484 goto fail; 485 } 486 487 /* Allocate DMA memory for firmware transfers. */ 488 if ((error = iwn_alloc_fwmem(sc)) != 0) { 489 device_printf(dev, 490 "could not allocate memory for firmware, error %d\n", 491 error); 492 goto fail; 493 } 494 495 /* Allocate "Keep Warm" page. */ 496 if ((error = iwn_alloc_kw(sc)) != 0) { 497 device_printf(dev, 498 "could not allocate keep warm page, error %d\n", error); 499 goto fail; 500 } 501 502 /* Allocate ICT table for 5000 Series. */ 503 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 504 (error = iwn_alloc_ict(sc)) != 0) { 505 device_printf(dev, "could not allocate ICT table, error %d\n", 506 error); 507 goto fail; 508 } 509 510 /* Allocate TX scheduler "rings". */ 511 if ((error = iwn_alloc_sched(sc)) != 0) { 512 device_printf(dev, 513 "could not allocate TX scheduler rings, error %d\n", error); 514 goto fail; 515 } 516 517 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 518 for (i = 0; i < sc->ntxqs; i++) { 519 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 520 device_printf(dev, 521 "could not allocate TX ring %d, error %d\n", i, 522 error); 523 goto fail; 524 } 525 } 526 527 /* Allocate RX ring. */ 528 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 529 device_printf(dev, "could not allocate RX ring, error %d\n", 530 error); 531 goto fail; 532 } 533 534 /* Clear pending interrupts. */ 535 IWN_WRITE(sc, IWN_INT, 0xffffffff); 536 537 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 538 if (ifp == NULL) { 539 device_printf(dev, "can not allocate ifnet structure\n"); 540 goto fail; 541 } 542 543 ic = ifp->if_l2com; 544 ic->ic_ifp = ifp; 545 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 546 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 547 548 /* Set device capabilities. */ 549 ic->ic_caps = 550 IEEE80211_C_STA /* station mode supported */ 551 | IEEE80211_C_MONITOR /* monitor mode supported */ 552 | IEEE80211_C_BGSCAN /* background scanning */ 553 | IEEE80211_C_TXPMGT /* tx power management */ 554 | IEEE80211_C_SHSLOT /* short slot time supported */ 555 | IEEE80211_C_WPA 556 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 557 #if 0 558 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 559 #endif 560 | IEEE80211_C_WME /* WME */ 561 | IEEE80211_C_PMGT /* Station-side power mgmt */ 562 ; 563 564 /* Read MAC address, channels, etc from EEPROM. */ 565 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 566 device_printf(dev, "could not read EEPROM, error %d\n", 567 error); 568 goto fail; 569 } 570 571 /* Count the number of available chains. */ 572 sc->ntxchains = 573 ((sc->txchainmask >> 2) & 1) + 574 ((sc->txchainmask >> 1) & 1) + 575 ((sc->txchainmask >> 0) & 1); 576 sc->nrxchains = 577 ((sc->rxchainmask >> 2) & 1) + 578 ((sc->rxchainmask >> 1) & 1) + 579 ((sc->rxchainmask >> 0) & 1); 580 if (bootverbose) { 581 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 582 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 583 macaddr, ":"); 584 } 585 586 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 587 ic->ic_rxstream = sc->nrxchains; 588 ic->ic_txstream = sc->ntxchains; 589 590 /* 591 * The NICs we currently support cap out at 2x2 support 592 * separate from the chains being used. 593 * 594 * This is a total hack to work around that until some 595 * per-device method is implemented to return the 596 * actual stream support. 597 * 598 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 599 * cap this! But, anything that touches rates in the 600 * driver needs to be audited first before 3x3 is enabled. 601 */ 602 if (ic->ic_rxstream > 2) 603 ic->ic_rxstream = 2; 604 if (ic->ic_txstream > 2) 605 ic->ic_txstream = 2; 606 607 ic->ic_htcaps = 608 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 609 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 610 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 611 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 612 #ifdef notyet 613 | IEEE80211_HTCAP_GREENFIELD 614 #if IWN_RBUF_SIZE == 8192 615 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 616 #else 617 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 618 #endif 619 #endif 620 /* s/w capabilities */ 621 | IEEE80211_HTC_HT /* HT operation */ 622 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 623 #ifdef notyet 624 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 625 #endif 626 ; 627 } 628 629 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 630 ifp->if_softc = sc; 631 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 632 ifp->if_init = iwn_init; 633 ifp->if_ioctl = iwn_ioctl; 634 ifp->if_start = iwn_start; 635 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 636 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 637 IFQ_SET_READY(&ifp->if_snd); 638 639 ieee80211_ifattach(ic, macaddr); 640 ic->ic_vap_create = iwn_vap_create; 641 ic->ic_vap_delete = iwn_vap_delete; 642 ic->ic_raw_xmit = iwn_raw_xmit; 643 ic->ic_node_alloc = iwn_node_alloc; 644 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 645 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 646 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 647 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 648 sc->sc_addba_request = ic->ic_addba_request; 649 ic->ic_addba_request = iwn_addba_request; 650 sc->sc_addba_response = ic->ic_addba_response; 651 ic->ic_addba_response = iwn_addba_response; 652 sc->sc_addba_stop = ic->ic_addba_stop; 653 ic->ic_addba_stop = iwn_ampdu_tx_stop; 654 ic->ic_newassoc = iwn_newassoc; 655 ic->ic_wme.wme_update = iwn_updateedca; 656 ic->ic_update_mcast = iwn_update_mcast; 657 ic->ic_scan_start = iwn_scan_start; 658 ic->ic_scan_end = iwn_scan_end; 659 ic->ic_set_channel = iwn_set_channel; 660 ic->ic_scan_curchan = iwn_scan_curchan; 661 ic->ic_scan_mindwell = iwn_scan_mindwell; 662 ic->ic_setregdomain = iwn_setregdomain; 663 664 iwn_radiotap_attach(sc); 665 666 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 667 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 668 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 669 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 670 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 671 672 iwn_sysctlattach(sc); 673 674 /* 675 * Hook our interrupt after all initialization is complete. 676 */ 677 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 678 NULL, iwn_intr, sc, &sc->sc_ih); 679 if (error != 0) { 680 device_printf(dev, "can't establish interrupt, error %d\n", 681 error); 682 goto fail; 683 } 684 685 if (bootverbose) 686 ieee80211_announce(ic); 687 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 688 return 0; 689 fail: 690 iwn_detach(dev); 691 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 692 return error; 693 } 694 695 /* 696 * Define specific configuration based on device id and subdevice id 697 * pid : PCI device id 698 */ 699 static int 700 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 701 { 702 703 switch (pid) { 704 /* 4965 series */ 705 case IWN_DID_4965_1: 706 case IWN_DID_4965_2: 707 case IWN_DID_4965_3: 708 case IWN_DID_4965_4: 709 sc->base_params = &iwn4965_base_params; 710 sc->limits = &iwn4965_sensitivity_limits; 711 sc->fwname = "iwn4965fw"; 712 /* Override chains masks, ROM is known to be broken. */ 713 sc->txchainmask = IWN_ANT_AB; 714 sc->rxchainmask = IWN_ANT_ABC; 715 /* Enable normal btcoex */ 716 sc->sc_flags |= IWN_FLAG_BTCOEX; 717 break; 718 /* 1000 Series */ 719 case IWN_DID_1000_1: 720 case IWN_DID_1000_2: 721 switch(sc->subdevice_id) { 722 case IWN_SDID_1000_1: 723 case IWN_SDID_1000_2: 724 case IWN_SDID_1000_3: 725 case IWN_SDID_1000_4: 726 case IWN_SDID_1000_5: 727 case IWN_SDID_1000_6: 728 case IWN_SDID_1000_7: 729 case IWN_SDID_1000_8: 730 case IWN_SDID_1000_9: 731 case IWN_SDID_1000_10: 732 case IWN_SDID_1000_11: 733 case IWN_SDID_1000_12: 734 sc->limits = &iwn1000_sensitivity_limits; 735 sc->base_params = &iwn1000_base_params; 736 sc->fwname = "iwn1000fw"; 737 break; 738 default: 739 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 740 "0x%04x rev %d not supported (subdevice)\n", pid, 741 sc->subdevice_id,sc->hw_type); 742 return ENOTSUP; 743 } 744 break; 745 /* 6x00 Series */ 746 case IWN_DID_6x00_2: 747 case IWN_DID_6x00_4: 748 case IWN_DID_6x00_1: 749 case IWN_DID_6x00_3: 750 sc->fwname = "iwn6000fw"; 751 sc->limits = &iwn6000_sensitivity_limits; 752 switch(sc->subdevice_id) { 753 case IWN_SDID_6x00_1: 754 case IWN_SDID_6x00_2: 755 case IWN_SDID_6x00_8: 756 //iwl6000_3agn_cfg 757 sc->base_params = &iwn_6000_base_params; 758 break; 759 case IWN_SDID_6x00_3: 760 case IWN_SDID_6x00_6: 761 case IWN_SDID_6x00_9: 762 ////iwl6000i_2agn 763 case IWN_SDID_6x00_4: 764 case IWN_SDID_6x00_7: 765 case IWN_SDID_6x00_10: 766 //iwl6000i_2abg_cfg 767 case IWN_SDID_6x00_5: 768 //iwl6000i_2bg_cfg 769 sc->base_params = &iwn_6000i_base_params; 770 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 771 sc->txchainmask = IWN_ANT_BC; 772 sc->rxchainmask = IWN_ANT_BC; 773 break; 774 default: 775 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 776 "0x%04x rev %d not supported (subdevice)\n", pid, 777 sc->subdevice_id,sc->hw_type); 778 return ENOTSUP; 779 } 780 break; 781 /* 6x05 Series */ 782 case IWN_DID_6x05_1: 783 case IWN_DID_6x05_2: 784 switch(sc->subdevice_id) { 785 case IWN_SDID_6x05_1: 786 case IWN_SDID_6x05_4: 787 case IWN_SDID_6x05_6: 788 //iwl6005_2agn_cfg 789 case IWN_SDID_6x05_2: 790 case IWN_SDID_6x05_5: 791 case IWN_SDID_6x05_7: 792 //iwl6005_2abg_cfg 793 case IWN_SDID_6x05_3: 794 //iwl6005_2bg_cfg 795 case IWN_SDID_6x05_8: 796 case IWN_SDID_6x05_9: 797 //iwl6005_2agn_sff_cfg 798 case IWN_SDID_6x05_10: 799 //iwl6005_2agn_d_cfg 800 case IWN_SDID_6x05_11: 801 //iwl6005_2agn_mow1_cfg 802 case IWN_SDID_6x05_12: 803 //iwl6005_2agn_mow2_cfg 804 sc->fwname = "iwn6000g2afw"; 805 sc->limits = &iwn6000_sensitivity_limits; 806 sc->base_params = &iwn_6000g2_base_params; 807 break; 808 default: 809 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 810 "0x%04x rev %d not supported (subdevice)\n", pid, 811 sc->subdevice_id,sc->hw_type); 812 return ENOTSUP; 813 } 814 break; 815 /* 6x35 Series */ 816 case IWN_DID_6035_1: 817 case IWN_DID_6035_2: 818 switch(sc->subdevice_id) { 819 case IWN_SDID_6035_1: 820 case IWN_SDID_6035_2: 821 case IWN_SDID_6035_3: 822 case IWN_SDID_6035_4: 823 sc->fwname = "iwn6000g2bfw"; 824 sc->limits = &iwn6235_sensitivity_limits; 825 sc->base_params = &iwn_6235_base_params; 826 break; 827 default: 828 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 829 "0x%04x rev %d not supported (subdevice)\n", pid, 830 sc->subdevice_id,sc->hw_type); 831 return ENOTSUP; 832 } 833 break; 834 /* 6x50 WiFi/WiMax Series */ 835 case IWN_DID_6050_1: 836 case IWN_DID_6050_2: 837 switch(sc->subdevice_id) { 838 case IWN_SDID_6050_1: 839 case IWN_SDID_6050_3: 840 case IWN_SDID_6050_5: 841 //iwl6050_2agn_cfg 842 case IWN_SDID_6050_2: 843 case IWN_SDID_6050_4: 844 case IWN_SDID_6050_6: 845 //iwl6050_2abg_cfg 846 sc->fwname = "iwn6050fw"; 847 sc->txchainmask = IWN_ANT_AB; 848 sc->rxchainmask = IWN_ANT_AB; 849 sc->limits = &iwn6000_sensitivity_limits; 850 sc->base_params = &iwn_6050_base_params; 851 break; 852 default: 853 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 854 "0x%04x rev %d not supported (subdevice)\n", pid, 855 sc->subdevice_id,sc->hw_type); 856 return ENOTSUP; 857 } 858 break; 859 /* 6150 WiFi/WiMax Series */ 860 case IWN_DID_6150_1: 861 case IWN_DID_6150_2: 862 switch(sc->subdevice_id) { 863 case IWN_SDID_6150_1: 864 case IWN_SDID_6150_3: 865 case IWN_SDID_6150_5: 866 // iwl6150_bgn_cfg 867 case IWN_SDID_6150_2: 868 case IWN_SDID_6150_4: 869 case IWN_SDID_6150_6: 870 //iwl6150_bg_cfg 871 sc->fwname = "iwn6050fw"; 872 sc->limits = &iwn6000_sensitivity_limits; 873 sc->base_params = &iwn_6150_base_params; 874 break; 875 default: 876 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 877 "0x%04x rev %d not supported (subdevice)\n", pid, 878 sc->subdevice_id,sc->hw_type); 879 return ENOTSUP; 880 } 881 break; 882 /* 6030 Series and 1030 Series */ 883 case IWN_DID_x030_1: 884 case IWN_DID_x030_2: 885 case IWN_DID_x030_3: 886 case IWN_DID_x030_4: 887 switch(sc->subdevice_id) { 888 case IWN_SDID_x030_1: 889 case IWN_SDID_x030_3: 890 case IWN_SDID_x030_5: 891 // iwl1030_bgn_cfg 892 case IWN_SDID_x030_2: 893 case IWN_SDID_x030_4: 894 case IWN_SDID_x030_6: 895 //iwl1030_bg_cfg 896 case IWN_SDID_x030_7: 897 case IWN_SDID_x030_10: 898 case IWN_SDID_x030_14: 899 //iwl6030_2agn_cfg 900 case IWN_SDID_x030_8: 901 case IWN_SDID_x030_11: 902 case IWN_SDID_x030_15: 903 // iwl6030_2bgn_cfg 904 case IWN_SDID_x030_9: 905 case IWN_SDID_x030_12: 906 case IWN_SDID_x030_16: 907 // iwl6030_2abg_cfg 908 case IWN_SDID_x030_13: 909 //iwl6030_2bg_cfg 910 sc->fwname = "iwn6000g2bfw"; 911 sc->limits = &iwn6000_sensitivity_limits; 912 sc->base_params = &iwn_6000g2b_base_params; 913 break; 914 default: 915 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 916 "0x%04x rev %d not supported (subdevice)\n", pid, 917 sc->subdevice_id,sc->hw_type); 918 return ENOTSUP; 919 } 920 break; 921 /* 130 Series WiFi */ 922 /* XXX: This series will need adjustment for rate. 923 * see rx_with_siso_diversity in linux kernel 924 */ 925 case IWN_DID_130_1: 926 case IWN_DID_130_2: 927 switch(sc->subdevice_id) { 928 case IWN_SDID_130_1: 929 case IWN_SDID_130_3: 930 case IWN_SDID_130_5: 931 //iwl130_bgn_cfg 932 case IWN_SDID_130_2: 933 case IWN_SDID_130_4: 934 case IWN_SDID_130_6: 935 //iwl130_bg_cfg 936 sc->fwname = "iwn6000g2bfw"; 937 sc->limits = &iwn6000_sensitivity_limits; 938 sc->base_params = &iwn_6000g2b_base_params; 939 break; 940 default: 941 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 942 "0x%04x rev %d not supported (subdevice)\n", pid, 943 sc->subdevice_id,sc->hw_type); 944 return ENOTSUP; 945 } 946 break; 947 /* 100 Series WiFi */ 948 case IWN_DID_100_1: 949 case IWN_DID_100_2: 950 switch(sc->subdevice_id) { 951 case IWN_SDID_100_1: 952 case IWN_SDID_100_2: 953 case IWN_SDID_100_3: 954 case IWN_SDID_100_4: 955 case IWN_SDID_100_5: 956 case IWN_SDID_100_6: 957 sc->limits = &iwn1000_sensitivity_limits; 958 sc->base_params = &iwn1000_base_params; 959 sc->fwname = "iwn100fw"; 960 break; 961 default: 962 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 963 "0x%04x rev %d not supported (subdevice)\n", pid, 964 sc->subdevice_id,sc->hw_type); 965 return ENOTSUP; 966 } 967 break; 968 969 /* 2x00 Series */ 970 case IWN_DID_2x00_1: 971 case IWN_DID_2x00_2: 972 switch(sc->subdevice_id) { 973 case IWN_SDID_2x00_1: 974 case IWN_SDID_2x00_2: 975 case IWN_SDID_2x00_3: 976 //iwl2000_2bgn_cfg 977 case IWN_SDID_2x00_4: 978 //iwl2000_2bgn_d_cfg 979 sc->limits = &iwn2030_sensitivity_limits; 980 sc->base_params = &iwn2000_base_params; 981 sc->fwname = "iwn2000fw"; 982 break; 983 default: 984 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 985 "0x%04x rev %d not supported (subdevice) \n", 986 pid, sc->subdevice_id, sc->hw_type); 987 return ENOTSUP; 988 } 989 break; 990 /* 2x30 Series */ 991 case IWN_DID_2x30_1: 992 case IWN_DID_2x30_2: 993 switch(sc->subdevice_id) { 994 case IWN_SDID_2x30_1: 995 case IWN_SDID_2x30_3: 996 case IWN_SDID_2x30_5: 997 //iwl100_bgn_cfg 998 case IWN_SDID_2x30_2: 999 case IWN_SDID_2x30_4: 1000 case IWN_SDID_2x30_6: 1001 //iwl100_bg_cfg 1002 sc->limits = &iwn2030_sensitivity_limits; 1003 sc->base_params = &iwn2030_base_params; 1004 sc->fwname = "iwn2030fw"; 1005 break; 1006 default: 1007 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1008 "0x%04x rev %d not supported (subdevice)\n", pid, 1009 sc->subdevice_id,sc->hw_type); 1010 return ENOTSUP; 1011 } 1012 break; 1013 /* 5x00 Series */ 1014 case IWN_DID_5x00_1: 1015 case IWN_DID_5x00_2: 1016 case IWN_DID_5x00_3: 1017 case IWN_DID_5x00_4: 1018 sc->limits = &iwn5000_sensitivity_limits; 1019 sc->base_params = &iwn5000_base_params; 1020 sc->fwname = "iwn5000fw"; 1021 switch(sc->subdevice_id) { 1022 case IWN_SDID_5x00_1: 1023 case IWN_SDID_5x00_2: 1024 case IWN_SDID_5x00_3: 1025 case IWN_SDID_5x00_4: 1026 case IWN_SDID_5x00_9: 1027 case IWN_SDID_5x00_10: 1028 case IWN_SDID_5x00_11: 1029 case IWN_SDID_5x00_12: 1030 case IWN_SDID_5x00_17: 1031 case IWN_SDID_5x00_18: 1032 case IWN_SDID_5x00_19: 1033 case IWN_SDID_5x00_20: 1034 //iwl5100_agn_cfg 1035 sc->txchainmask = IWN_ANT_B; 1036 sc->rxchainmask = IWN_ANT_AB; 1037 break; 1038 case IWN_SDID_5x00_5: 1039 case IWN_SDID_5x00_6: 1040 case IWN_SDID_5x00_13: 1041 case IWN_SDID_5x00_14: 1042 case IWN_SDID_5x00_21: 1043 case IWN_SDID_5x00_22: 1044 //iwl5100_bgn_cfg 1045 sc->txchainmask = IWN_ANT_B; 1046 sc->rxchainmask = IWN_ANT_AB; 1047 break; 1048 case IWN_SDID_5x00_7: 1049 case IWN_SDID_5x00_8: 1050 case IWN_SDID_5x00_15: 1051 case IWN_SDID_5x00_16: 1052 case IWN_SDID_5x00_23: 1053 case IWN_SDID_5x00_24: 1054 //iwl5100_abg_cfg 1055 sc->txchainmask = IWN_ANT_B; 1056 sc->rxchainmask = IWN_ANT_AB; 1057 break; 1058 case IWN_SDID_5x00_25: 1059 case IWN_SDID_5x00_26: 1060 case IWN_SDID_5x00_27: 1061 case IWN_SDID_5x00_28: 1062 case IWN_SDID_5x00_29: 1063 case IWN_SDID_5x00_30: 1064 case IWN_SDID_5x00_31: 1065 case IWN_SDID_5x00_32: 1066 case IWN_SDID_5x00_33: 1067 case IWN_SDID_5x00_34: 1068 case IWN_SDID_5x00_35: 1069 case IWN_SDID_5x00_36: 1070 //iwl5300_agn_cfg 1071 sc->txchainmask = IWN_ANT_ABC; 1072 sc->rxchainmask = IWN_ANT_ABC; 1073 break; 1074 default: 1075 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1076 "0x%04x rev %d not supported (subdevice)\n", pid, 1077 sc->subdevice_id,sc->hw_type); 1078 return ENOTSUP; 1079 } 1080 break; 1081 /* 5x50 Series */ 1082 case IWN_DID_5x50_1: 1083 case IWN_DID_5x50_2: 1084 case IWN_DID_5x50_3: 1085 case IWN_DID_5x50_4: 1086 sc->limits = &iwn5000_sensitivity_limits; 1087 sc->base_params = &iwn5000_base_params; 1088 sc->fwname = "iwn5000fw"; 1089 switch(sc->subdevice_id) { 1090 case IWN_SDID_5x50_1: 1091 case IWN_SDID_5x50_2: 1092 case IWN_SDID_5x50_3: 1093 //iwl5350_agn_cfg 1094 sc->limits = &iwn5000_sensitivity_limits; 1095 sc->base_params = &iwn5000_base_params; 1096 sc->fwname = "iwn5000fw"; 1097 break; 1098 case IWN_SDID_5x50_4: 1099 case IWN_SDID_5x50_5: 1100 case IWN_SDID_5x50_8: 1101 case IWN_SDID_5x50_9: 1102 case IWN_SDID_5x50_10: 1103 case IWN_SDID_5x50_11: 1104 //iwl5150_agn_cfg 1105 case IWN_SDID_5x50_6: 1106 case IWN_SDID_5x50_7: 1107 case IWN_SDID_5x50_12: 1108 case IWN_SDID_5x50_13: 1109 //iwl5150_abg_cfg 1110 sc->limits = &iwn5000_sensitivity_limits; 1111 sc->fwname = "iwn5150fw"; 1112 sc->base_params = &iwn_5x50_base_params; 1113 break; 1114 default: 1115 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1116 "0x%04x rev %d not supported (subdevice)\n", pid, 1117 sc->subdevice_id,sc->hw_type); 1118 return ENOTSUP; 1119 } 1120 break; 1121 default: 1122 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1123 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1124 sc->hw_type); 1125 return ENOTSUP; 1126 } 1127 return 0; 1128 } 1129 1130 static int 1131 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1132 { 1133 struct iwn_ops *ops = &sc->ops; 1134 1135 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1136 ops->load_firmware = iwn4965_load_firmware; 1137 ops->read_eeprom = iwn4965_read_eeprom; 1138 ops->post_alive = iwn4965_post_alive; 1139 ops->nic_config = iwn4965_nic_config; 1140 ops->update_sched = iwn4965_update_sched; 1141 ops->get_temperature = iwn4965_get_temperature; 1142 ops->get_rssi = iwn4965_get_rssi; 1143 ops->set_txpower = iwn4965_set_txpower; 1144 ops->init_gains = iwn4965_init_gains; 1145 ops->set_gains = iwn4965_set_gains; 1146 ops->add_node = iwn4965_add_node; 1147 ops->tx_done = iwn4965_tx_done; 1148 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1149 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1150 sc->ntxqs = IWN4965_NTXQUEUES; 1151 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1152 sc->ndmachnls = IWN4965_NDMACHNLS; 1153 sc->broadcast_id = IWN4965_ID_BROADCAST; 1154 sc->rxonsz = IWN4965_RXONSZ; 1155 sc->schedsz = IWN4965_SCHEDSZ; 1156 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1157 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1158 sc->fwsz = IWN4965_FWSZ; 1159 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1160 sc->limits = &iwn4965_sensitivity_limits; 1161 sc->fwname = "iwn4965fw"; 1162 /* Override chains masks, ROM is known to be broken. */ 1163 sc->txchainmask = IWN_ANT_AB; 1164 sc->rxchainmask = IWN_ANT_ABC; 1165 /* Enable normal btcoex */ 1166 sc->sc_flags |= IWN_FLAG_BTCOEX; 1167 1168 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1169 1170 return 0; 1171 } 1172 1173 static int 1174 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1175 { 1176 struct iwn_ops *ops = &sc->ops; 1177 1178 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1179 1180 ops->load_firmware = iwn5000_load_firmware; 1181 ops->read_eeprom = iwn5000_read_eeprom; 1182 ops->post_alive = iwn5000_post_alive; 1183 ops->nic_config = iwn5000_nic_config; 1184 ops->update_sched = iwn5000_update_sched; 1185 ops->get_temperature = iwn5000_get_temperature; 1186 ops->get_rssi = iwn5000_get_rssi; 1187 ops->set_txpower = iwn5000_set_txpower; 1188 ops->init_gains = iwn5000_init_gains; 1189 ops->set_gains = iwn5000_set_gains; 1190 ops->add_node = iwn5000_add_node; 1191 ops->tx_done = iwn5000_tx_done; 1192 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1193 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1194 sc->ntxqs = IWN5000_NTXQUEUES; 1195 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1196 sc->ndmachnls = IWN5000_NDMACHNLS; 1197 sc->broadcast_id = IWN5000_ID_BROADCAST; 1198 sc->rxonsz = IWN5000_RXONSZ; 1199 sc->schedsz = IWN5000_SCHEDSZ; 1200 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1201 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1202 sc->fwsz = IWN5000_FWSZ; 1203 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1204 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1205 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1206 1207 return 0; 1208 } 1209 1210 /* 1211 * Attach the interface to 802.11 radiotap. 1212 */ 1213 static void 1214 iwn_radiotap_attach(struct iwn_softc *sc) 1215 { 1216 struct ifnet *ifp = sc->sc_ifp; 1217 struct ieee80211com *ic = ifp->if_l2com; 1218 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1219 ieee80211_radiotap_attach(ic, 1220 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1221 IWN_TX_RADIOTAP_PRESENT, 1222 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1223 IWN_RX_RADIOTAP_PRESENT); 1224 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1225 } 1226 1227 static void 1228 iwn_sysctlattach(struct iwn_softc *sc) 1229 { 1230 #ifdef IWN_DEBUG 1231 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1232 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1233 1234 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1235 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1236 "control debugging printfs"); 1237 #endif 1238 } 1239 1240 static struct ieee80211vap * 1241 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1242 enum ieee80211_opmode opmode, int flags, 1243 const uint8_t bssid[IEEE80211_ADDR_LEN], 1244 const uint8_t mac[IEEE80211_ADDR_LEN]) 1245 { 1246 struct iwn_vap *ivp; 1247 struct ieee80211vap *vap; 1248 uint8_t mac1[IEEE80211_ADDR_LEN]; 1249 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1250 1251 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1252 return NULL; 1253 1254 IEEE80211_ADDR_COPY(mac1, mac); 1255 1256 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 1257 M_80211_VAP, M_NOWAIT | M_ZERO); 1258 if (ivp == NULL) 1259 return NULL; 1260 vap = &ivp->iv_vap; 1261 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1262 ivp->ctx = IWN_RXON_BSS_CTX; 1263 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1264 vap->iv_bmissthreshold = 10; /* override default */ 1265 /* Override with driver methods. */ 1266 ivp->iv_newstate = vap->iv_newstate; 1267 vap->iv_newstate = iwn_newstate; 1268 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1269 1270 ieee80211_ratectl_init(vap); 1271 /* Complete setup. */ 1272 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1273 ic->ic_opmode = opmode; 1274 return vap; 1275 } 1276 1277 static void 1278 iwn_vap_delete(struct ieee80211vap *vap) 1279 { 1280 struct iwn_vap *ivp = IWN_VAP(vap); 1281 1282 ieee80211_ratectl_deinit(vap); 1283 ieee80211_vap_detach(vap); 1284 free(ivp, M_80211_VAP); 1285 } 1286 1287 static int 1288 iwn_detach(device_t dev) 1289 { 1290 struct iwn_softc *sc = device_get_softc(dev); 1291 struct ifnet *ifp = sc->sc_ifp; 1292 struct ieee80211com *ic; 1293 int qid; 1294 1295 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1296 1297 if (ifp != NULL) { 1298 ic = ifp->if_l2com; 1299 1300 ieee80211_draintask(ic, &sc->sc_reinit_task); 1301 ieee80211_draintask(ic, &sc->sc_radioon_task); 1302 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1303 1304 iwn_stop(sc); 1305 callout_drain(&sc->watchdog_to); 1306 callout_drain(&sc->calib_to); 1307 ieee80211_ifdetach(ic); 1308 } 1309 1310 /* Uninstall interrupt handler. */ 1311 if (sc->irq != NULL) { 1312 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1313 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1314 sc->irq); 1315 pci_release_msi(dev); 1316 } 1317 1318 /* Free DMA resources. */ 1319 iwn_free_rx_ring(sc, &sc->rxq); 1320 for (qid = 0; qid < sc->ntxqs; qid++) 1321 iwn_free_tx_ring(sc, &sc->txq[qid]); 1322 iwn_free_sched(sc); 1323 iwn_free_kw(sc); 1324 if (sc->ict != NULL) 1325 iwn_free_ict(sc); 1326 iwn_free_fwmem(sc); 1327 1328 if (sc->mem != NULL) 1329 bus_release_resource(dev, SYS_RES_MEMORY, 1330 rman_get_rid(sc->mem), sc->mem); 1331 1332 if (ifp != NULL) 1333 if_free(ifp); 1334 1335 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1336 IWN_LOCK_DESTROY(sc); 1337 return 0; 1338 } 1339 1340 static int 1341 iwn_shutdown(device_t dev) 1342 { 1343 struct iwn_softc *sc = device_get_softc(dev); 1344 1345 iwn_stop(sc); 1346 return 0; 1347 } 1348 1349 static int 1350 iwn_suspend(device_t dev) 1351 { 1352 struct iwn_softc *sc = device_get_softc(dev); 1353 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1354 1355 ieee80211_suspend_all(ic); 1356 return 0; 1357 } 1358 1359 static int 1360 iwn_resume(device_t dev) 1361 { 1362 struct iwn_softc *sc = device_get_softc(dev); 1363 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1364 1365 /* Clear device-specific "PCI retry timeout" register (41h). */ 1366 pci_write_config(dev, 0x41, 0, 1); 1367 1368 ieee80211_resume_all(ic); 1369 return 0; 1370 } 1371 1372 static int 1373 iwn_nic_lock(struct iwn_softc *sc) 1374 { 1375 int ntries; 1376 1377 /* Request exclusive access to NIC. */ 1378 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1379 1380 /* Spin until we actually get the lock. */ 1381 for (ntries = 0; ntries < 1000; ntries++) { 1382 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1383 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1384 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1385 return 0; 1386 DELAY(10); 1387 } 1388 return ETIMEDOUT; 1389 } 1390 1391 static __inline void 1392 iwn_nic_unlock(struct iwn_softc *sc) 1393 { 1394 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1395 } 1396 1397 static __inline uint32_t 1398 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1399 { 1400 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1401 IWN_BARRIER_READ_WRITE(sc); 1402 return IWN_READ(sc, IWN_PRPH_RDATA); 1403 } 1404 1405 static __inline void 1406 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1407 { 1408 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1409 IWN_BARRIER_WRITE(sc); 1410 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1411 } 1412 1413 static __inline void 1414 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1415 { 1416 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1417 } 1418 1419 static __inline void 1420 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1421 { 1422 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1423 } 1424 1425 static __inline void 1426 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1427 const uint32_t *data, int count) 1428 { 1429 for (; count > 0; count--, data++, addr += 4) 1430 iwn_prph_write(sc, addr, *data); 1431 } 1432 1433 static __inline uint32_t 1434 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1435 { 1436 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1437 IWN_BARRIER_READ_WRITE(sc); 1438 return IWN_READ(sc, IWN_MEM_RDATA); 1439 } 1440 1441 static __inline void 1442 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1443 { 1444 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1445 IWN_BARRIER_WRITE(sc); 1446 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1447 } 1448 1449 static __inline void 1450 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1451 { 1452 uint32_t tmp; 1453 1454 tmp = iwn_mem_read(sc, addr & ~3); 1455 if (addr & 3) 1456 tmp = (tmp & 0x0000ffff) | data << 16; 1457 else 1458 tmp = (tmp & 0xffff0000) | data; 1459 iwn_mem_write(sc, addr & ~3, tmp); 1460 } 1461 1462 static __inline void 1463 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1464 int count) 1465 { 1466 for (; count > 0; count--, addr += 4) 1467 *data++ = iwn_mem_read(sc, addr); 1468 } 1469 1470 static __inline void 1471 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1472 int count) 1473 { 1474 for (; count > 0; count--, addr += 4) 1475 iwn_mem_write(sc, addr, val); 1476 } 1477 1478 static int 1479 iwn_eeprom_lock(struct iwn_softc *sc) 1480 { 1481 int i, ntries; 1482 1483 for (i = 0; i < 100; i++) { 1484 /* Request exclusive access to EEPROM. */ 1485 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1486 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1487 1488 /* Spin until we actually get the lock. */ 1489 for (ntries = 0; ntries < 100; ntries++) { 1490 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1491 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1492 return 0; 1493 DELAY(10); 1494 } 1495 } 1496 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1497 return ETIMEDOUT; 1498 } 1499 1500 static __inline void 1501 iwn_eeprom_unlock(struct iwn_softc *sc) 1502 { 1503 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1504 } 1505 1506 /* 1507 * Initialize access by host to One Time Programmable ROM. 1508 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1509 */ 1510 static int 1511 iwn_init_otprom(struct iwn_softc *sc) 1512 { 1513 uint16_t prev, base, next; 1514 int count, error; 1515 1516 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1517 1518 /* Wait for clock stabilization before accessing prph. */ 1519 if ((error = iwn_clock_wait(sc)) != 0) 1520 return error; 1521 1522 if ((error = iwn_nic_lock(sc)) != 0) 1523 return error; 1524 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1525 DELAY(5); 1526 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1527 iwn_nic_unlock(sc); 1528 1529 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1530 if (sc->base_params->shadow_ram_support) { 1531 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1532 IWN_RESET_LINK_PWR_MGMT_DIS); 1533 } 1534 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1535 /* Clear ECC status. */ 1536 IWN_SETBITS(sc, IWN_OTP_GP, 1537 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1538 1539 /* 1540 * Find the block before last block (contains the EEPROM image) 1541 * for HW without OTP shadow RAM. 1542 */ 1543 if (! sc->base_params->shadow_ram_support) { 1544 /* Switch to absolute addressing mode. */ 1545 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1546 base = prev = 0; 1547 for (count = 0; count < sc->base_params->max_ll_items; 1548 count++) { 1549 error = iwn_read_prom_data(sc, base, &next, 2); 1550 if (error != 0) 1551 return error; 1552 if (next == 0) /* End of linked-list. */ 1553 break; 1554 prev = base; 1555 base = le16toh(next); 1556 } 1557 if (count == 0 || count == sc->base_params->max_ll_items) 1558 return EIO; 1559 /* Skip "next" word. */ 1560 sc->prom_base = prev + 1; 1561 } 1562 1563 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1564 1565 return 0; 1566 } 1567 1568 static int 1569 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1570 { 1571 uint8_t *out = data; 1572 uint32_t val, tmp; 1573 int ntries; 1574 1575 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1576 1577 addr += sc->prom_base; 1578 for (; count > 0; count -= 2, addr++) { 1579 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1580 for (ntries = 0; ntries < 10; ntries++) { 1581 val = IWN_READ(sc, IWN_EEPROM); 1582 if (val & IWN_EEPROM_READ_VALID) 1583 break; 1584 DELAY(5); 1585 } 1586 if (ntries == 10) { 1587 device_printf(sc->sc_dev, 1588 "timeout reading ROM at 0x%x\n", addr); 1589 return ETIMEDOUT; 1590 } 1591 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1592 /* OTPROM, check for ECC errors. */ 1593 tmp = IWN_READ(sc, IWN_OTP_GP); 1594 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1595 device_printf(sc->sc_dev, 1596 "OTPROM ECC error at 0x%x\n", addr); 1597 return EIO; 1598 } 1599 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1600 /* Correctable ECC error, clear bit. */ 1601 IWN_SETBITS(sc, IWN_OTP_GP, 1602 IWN_OTP_GP_ECC_CORR_STTS); 1603 } 1604 } 1605 *out++ = val >> 16; 1606 if (count > 1) 1607 *out++ = val >> 24; 1608 } 1609 1610 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1611 1612 return 0; 1613 } 1614 1615 static void 1616 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1617 { 1618 if (error != 0) 1619 return; 1620 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1621 *(bus_addr_t *)arg = segs[0].ds_addr; 1622 } 1623 1624 static int 1625 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1626 void **kvap, bus_size_t size, bus_size_t alignment) 1627 { 1628 int error; 1629 1630 dma->tag = NULL; 1631 dma->size = size; 1632 1633 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1634 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1635 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1636 if (error != 0) 1637 goto fail; 1638 1639 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1640 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1641 if (error != 0) 1642 goto fail; 1643 1644 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1645 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1646 if (error != 0) 1647 goto fail; 1648 1649 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1650 1651 if (kvap != NULL) 1652 *kvap = dma->vaddr; 1653 1654 return 0; 1655 1656 fail: iwn_dma_contig_free(dma); 1657 return error; 1658 } 1659 1660 static void 1661 iwn_dma_contig_free(struct iwn_dma_info *dma) 1662 { 1663 if (dma->map != NULL) { 1664 if (dma->vaddr != NULL) { 1665 bus_dmamap_sync(dma->tag, dma->map, 1666 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1667 bus_dmamap_unload(dma->tag, dma->map); 1668 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1669 dma->vaddr = NULL; 1670 } 1671 bus_dmamap_destroy(dma->tag, dma->map); 1672 dma->map = NULL; 1673 } 1674 if (dma->tag != NULL) { 1675 bus_dma_tag_destroy(dma->tag); 1676 dma->tag = NULL; 1677 } 1678 } 1679 1680 static int 1681 iwn_alloc_sched(struct iwn_softc *sc) 1682 { 1683 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1684 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1685 sc->schedsz, 1024); 1686 } 1687 1688 static void 1689 iwn_free_sched(struct iwn_softc *sc) 1690 { 1691 iwn_dma_contig_free(&sc->sched_dma); 1692 } 1693 1694 static int 1695 iwn_alloc_kw(struct iwn_softc *sc) 1696 { 1697 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1698 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1699 } 1700 1701 static void 1702 iwn_free_kw(struct iwn_softc *sc) 1703 { 1704 iwn_dma_contig_free(&sc->kw_dma); 1705 } 1706 1707 static int 1708 iwn_alloc_ict(struct iwn_softc *sc) 1709 { 1710 /* ICT table must be aligned on a 4KB boundary. */ 1711 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1712 IWN_ICT_SIZE, 4096); 1713 } 1714 1715 static void 1716 iwn_free_ict(struct iwn_softc *sc) 1717 { 1718 iwn_dma_contig_free(&sc->ict_dma); 1719 } 1720 1721 static int 1722 iwn_alloc_fwmem(struct iwn_softc *sc) 1723 { 1724 /* Must be aligned on a 16-byte boundary. */ 1725 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1726 } 1727 1728 static void 1729 iwn_free_fwmem(struct iwn_softc *sc) 1730 { 1731 iwn_dma_contig_free(&sc->fw_dma); 1732 } 1733 1734 static int 1735 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1736 { 1737 bus_size_t size; 1738 int i, error; 1739 1740 ring->cur = 0; 1741 1742 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1743 1744 /* Allocate RX descriptors (256-byte aligned). */ 1745 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1746 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1747 size, 256); 1748 if (error != 0) { 1749 device_printf(sc->sc_dev, 1750 "%s: could not allocate RX ring DMA memory, error %d\n", 1751 __func__, error); 1752 goto fail; 1753 } 1754 1755 /* Allocate RX status area (16-byte aligned). */ 1756 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1757 sizeof (struct iwn_rx_status), 16); 1758 if (error != 0) { 1759 device_printf(sc->sc_dev, 1760 "%s: could not allocate RX status DMA memory, error %d\n", 1761 __func__, error); 1762 goto fail; 1763 } 1764 1765 /* Create RX buffer DMA tag. */ 1766 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1767 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1768 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1769 &ring->data_dmat); 1770 if (error != 0) { 1771 device_printf(sc->sc_dev, 1772 "%s: could not create RX buf DMA tag, error %d\n", 1773 __func__, error); 1774 goto fail; 1775 } 1776 1777 /* 1778 * Allocate and map RX buffers. 1779 */ 1780 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1781 struct iwn_rx_data *data = &ring->data[i]; 1782 bus_addr_t paddr; 1783 1784 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1785 if (error != 0) { 1786 device_printf(sc->sc_dev, 1787 "%s: could not create RX buf DMA map, error %d\n", 1788 __func__, error); 1789 goto fail; 1790 } 1791 1792 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1793 IWN_RBUF_SIZE); 1794 if (data->m == NULL) { 1795 device_printf(sc->sc_dev, 1796 "%s: could not allocate RX mbuf\n", __func__); 1797 error = ENOBUFS; 1798 goto fail; 1799 } 1800 1801 error = bus_dmamap_load(ring->data_dmat, data->map, 1802 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1803 &paddr, BUS_DMA_NOWAIT); 1804 if (error != 0 && error != EFBIG) { 1805 device_printf(sc->sc_dev, 1806 "%s: can't not map mbuf, error %d\n", __func__, 1807 error); 1808 goto fail; 1809 } 1810 1811 /* Set physical address of RX buffer (256-byte aligned). */ 1812 ring->desc[i] = htole32(paddr >> 8); 1813 } 1814 1815 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1816 BUS_DMASYNC_PREWRITE); 1817 1818 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1819 1820 return 0; 1821 1822 fail: iwn_free_rx_ring(sc, ring); 1823 1824 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1825 1826 return error; 1827 } 1828 1829 static void 1830 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1831 { 1832 int ntries; 1833 1834 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1835 1836 if (iwn_nic_lock(sc) == 0) { 1837 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1838 for (ntries = 0; ntries < 1000; ntries++) { 1839 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1840 IWN_FH_RX_STATUS_IDLE) 1841 break; 1842 DELAY(10); 1843 } 1844 iwn_nic_unlock(sc); 1845 } 1846 ring->cur = 0; 1847 sc->last_rx_valid = 0; 1848 } 1849 1850 static void 1851 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1852 { 1853 int i; 1854 1855 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1856 1857 iwn_dma_contig_free(&ring->desc_dma); 1858 iwn_dma_contig_free(&ring->stat_dma); 1859 1860 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1861 struct iwn_rx_data *data = &ring->data[i]; 1862 1863 if (data->m != NULL) { 1864 bus_dmamap_sync(ring->data_dmat, data->map, 1865 BUS_DMASYNC_POSTREAD); 1866 bus_dmamap_unload(ring->data_dmat, data->map); 1867 m_freem(data->m); 1868 data->m = NULL; 1869 } 1870 if (data->map != NULL) 1871 bus_dmamap_destroy(ring->data_dmat, data->map); 1872 } 1873 if (ring->data_dmat != NULL) { 1874 bus_dma_tag_destroy(ring->data_dmat); 1875 ring->data_dmat = NULL; 1876 } 1877 } 1878 1879 static int 1880 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1881 { 1882 bus_addr_t paddr; 1883 bus_size_t size; 1884 int i, error; 1885 1886 ring->qid = qid; 1887 ring->queued = 0; 1888 ring->cur = 0; 1889 1890 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1891 1892 /* Allocate TX descriptors (256-byte aligned). */ 1893 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1894 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1895 size, 256); 1896 if (error != 0) { 1897 device_printf(sc->sc_dev, 1898 "%s: could not allocate TX ring DMA memory, error %d\n", 1899 __func__, error); 1900 goto fail; 1901 } 1902 1903 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1904 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1905 size, 4); 1906 if (error != 0) { 1907 device_printf(sc->sc_dev, 1908 "%s: could not allocate TX cmd DMA memory, error %d\n", 1909 __func__, error); 1910 goto fail; 1911 } 1912 1913 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1914 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1915 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1916 &ring->data_dmat); 1917 if (error != 0) { 1918 device_printf(sc->sc_dev, 1919 "%s: could not create TX buf DMA tag, error %d\n", 1920 __func__, error); 1921 goto fail; 1922 } 1923 1924 paddr = ring->cmd_dma.paddr; 1925 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1926 struct iwn_tx_data *data = &ring->data[i]; 1927 1928 data->cmd_paddr = paddr; 1929 data->scratch_paddr = paddr + 12; 1930 paddr += sizeof (struct iwn_tx_cmd); 1931 1932 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1933 if (error != 0) { 1934 device_printf(sc->sc_dev, 1935 "%s: could not create TX buf DMA map, error %d\n", 1936 __func__, error); 1937 goto fail; 1938 } 1939 } 1940 1941 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1942 1943 return 0; 1944 1945 fail: iwn_free_tx_ring(sc, ring); 1946 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 1947 return error; 1948 } 1949 1950 static void 1951 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1952 { 1953 int i; 1954 1955 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 1956 1957 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1958 struct iwn_tx_data *data = &ring->data[i]; 1959 1960 if (data->m != NULL) { 1961 bus_dmamap_sync(ring->data_dmat, data->map, 1962 BUS_DMASYNC_POSTWRITE); 1963 bus_dmamap_unload(ring->data_dmat, data->map); 1964 m_freem(data->m); 1965 data->m = NULL; 1966 } 1967 } 1968 /* Clear TX descriptors. */ 1969 memset(ring->desc, 0, ring->desc_dma.size); 1970 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1971 BUS_DMASYNC_PREWRITE); 1972 sc->qfullmsk &= ~(1 << ring->qid); 1973 ring->queued = 0; 1974 ring->cur = 0; 1975 } 1976 1977 static void 1978 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1979 { 1980 int i; 1981 1982 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1983 1984 iwn_dma_contig_free(&ring->desc_dma); 1985 iwn_dma_contig_free(&ring->cmd_dma); 1986 1987 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1988 struct iwn_tx_data *data = &ring->data[i]; 1989 1990 if (data->m != NULL) { 1991 bus_dmamap_sync(ring->data_dmat, data->map, 1992 BUS_DMASYNC_POSTWRITE); 1993 bus_dmamap_unload(ring->data_dmat, data->map); 1994 m_freem(data->m); 1995 } 1996 if (data->map != NULL) 1997 bus_dmamap_destroy(ring->data_dmat, data->map); 1998 } 1999 if (ring->data_dmat != NULL) { 2000 bus_dma_tag_destroy(ring->data_dmat); 2001 ring->data_dmat = NULL; 2002 } 2003 } 2004 2005 static void 2006 iwn5000_ict_reset(struct iwn_softc *sc) 2007 { 2008 /* Disable interrupts. */ 2009 IWN_WRITE(sc, IWN_INT_MASK, 0); 2010 2011 /* Reset ICT table. */ 2012 memset(sc->ict, 0, IWN_ICT_SIZE); 2013 sc->ict_cur = 0; 2014 2015 /* Set physical address of ICT table (4KB aligned). */ 2016 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2017 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2018 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2019 2020 /* Enable periodic RX interrupt. */ 2021 sc->int_mask |= IWN_INT_RX_PERIODIC; 2022 /* Switch to ICT interrupt mode in driver. */ 2023 sc->sc_flags |= IWN_FLAG_USE_ICT; 2024 2025 /* Re-enable interrupts. */ 2026 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2027 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2028 } 2029 2030 static int 2031 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2032 { 2033 struct iwn_ops *ops = &sc->ops; 2034 uint16_t val; 2035 int error; 2036 2037 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2038 2039 /* Check whether adapter has an EEPROM or an OTPROM. */ 2040 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2041 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2042 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2043 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2044 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2045 2046 /* Adapter has to be powered on for EEPROM access to work. */ 2047 if ((error = iwn_apm_init(sc)) != 0) { 2048 device_printf(sc->sc_dev, 2049 "%s: could not power ON adapter, error %d\n", __func__, 2050 error); 2051 return error; 2052 } 2053 2054 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2055 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2056 return EIO; 2057 } 2058 if ((error = iwn_eeprom_lock(sc)) != 0) { 2059 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2060 __func__, error); 2061 return error; 2062 } 2063 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2064 if ((error = iwn_init_otprom(sc)) != 0) { 2065 device_printf(sc->sc_dev, 2066 "%s: could not initialize OTPROM, error %d\n", 2067 __func__, error); 2068 return error; 2069 } 2070 } 2071 2072 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2073 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2074 /* Check if HT support is bonded out. */ 2075 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2076 sc->sc_flags |= IWN_FLAG_HAS_11N; 2077 2078 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2079 sc->rfcfg = le16toh(val); 2080 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2081 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2082 if (sc->txchainmask == 0) 2083 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2084 if (sc->rxchainmask == 0) 2085 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2086 2087 /* Read MAC address. */ 2088 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2089 2090 /* Read adapter-specific information from EEPROM. */ 2091 ops->read_eeprom(sc); 2092 2093 iwn_apm_stop(sc); /* Power OFF adapter. */ 2094 2095 iwn_eeprom_unlock(sc); 2096 2097 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2098 2099 return 0; 2100 } 2101 2102 static void 2103 iwn4965_read_eeprom(struct iwn_softc *sc) 2104 { 2105 uint32_t addr; 2106 uint16_t val; 2107 int i; 2108 2109 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2110 2111 /* Read regulatory domain (4 ASCII characters). */ 2112 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2113 2114 /* Read the list of authorized channels (20MHz ones only). */ 2115 for (i = 0; i < IWN_NBANDS - 1; i++) { 2116 addr = iwn4965_regulatory_bands[i]; 2117 iwn_read_eeprom_channels(sc, i, addr); 2118 } 2119 2120 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2121 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2122 sc->maxpwr2GHz = val & 0xff; 2123 sc->maxpwr5GHz = val >> 8; 2124 /* Check that EEPROM values are within valid range. */ 2125 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2126 sc->maxpwr5GHz = 38; 2127 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2128 sc->maxpwr2GHz = 38; 2129 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2130 sc->maxpwr2GHz, sc->maxpwr5GHz); 2131 2132 /* Read samples for each TX power group. */ 2133 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2134 sizeof sc->bands); 2135 2136 /* Read voltage at which samples were taken. */ 2137 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2138 sc->eeprom_voltage = (int16_t)le16toh(val); 2139 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2140 sc->eeprom_voltage); 2141 2142 #ifdef IWN_DEBUG 2143 /* Print samples. */ 2144 if (sc->sc_debug & IWN_DEBUG_ANY) { 2145 for (i = 0; i < IWN_NBANDS - 1; i++) 2146 iwn4965_print_power_group(sc, i); 2147 } 2148 #endif 2149 2150 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2151 } 2152 2153 #ifdef IWN_DEBUG 2154 static void 2155 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2156 { 2157 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2158 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2159 int j, c; 2160 2161 printf("===band %d===\n", i); 2162 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2163 printf("chan1 num=%d\n", chans[0].num); 2164 for (c = 0; c < 2; c++) { 2165 for (j = 0; j < IWN_NSAMPLES; j++) { 2166 printf("chain %d, sample %d: temp=%d gain=%d " 2167 "power=%d pa_det=%d\n", c, j, 2168 chans[0].samples[c][j].temp, 2169 chans[0].samples[c][j].gain, 2170 chans[0].samples[c][j].power, 2171 chans[0].samples[c][j].pa_det); 2172 } 2173 } 2174 printf("chan2 num=%d\n", chans[1].num); 2175 for (c = 0; c < 2; c++) { 2176 for (j = 0; j < IWN_NSAMPLES; j++) { 2177 printf("chain %d, sample %d: temp=%d gain=%d " 2178 "power=%d pa_det=%d\n", c, j, 2179 chans[1].samples[c][j].temp, 2180 chans[1].samples[c][j].gain, 2181 chans[1].samples[c][j].power, 2182 chans[1].samples[c][j].pa_det); 2183 } 2184 } 2185 } 2186 #endif 2187 2188 static void 2189 iwn5000_read_eeprom(struct iwn_softc *sc) 2190 { 2191 struct iwn5000_eeprom_calib_hdr hdr; 2192 int32_t volt; 2193 uint32_t base, addr; 2194 uint16_t val; 2195 int i; 2196 2197 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2198 2199 /* Read regulatory domain (4 ASCII characters). */ 2200 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2201 base = le16toh(val); 2202 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2203 sc->eeprom_domain, 4); 2204 2205 /* Read the list of authorized channels (20MHz ones only). */ 2206 for (i = 0; i < IWN_NBANDS - 1; i++) { 2207 addr = base + sc->base_params->regulatory_bands[i]; 2208 iwn_read_eeprom_channels(sc, i, addr); 2209 } 2210 2211 /* Read enhanced TX power information for 6000 Series. */ 2212 if (sc->base_params->enhanced_TX_power) 2213 iwn_read_eeprom_enhinfo(sc); 2214 2215 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2216 base = le16toh(val); 2217 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2218 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2219 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2220 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2221 sc->calib_ver = hdr.version; 2222 2223 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2224 sc->eeprom_voltage = le16toh(hdr.volt); 2225 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2226 sc->eeprom_temp_high=le16toh(val); 2227 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2228 sc->eeprom_temp = le16toh(val); 2229 } 2230 2231 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2232 /* Compute temperature offset. */ 2233 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2234 sc->eeprom_temp = le16toh(val); 2235 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2236 volt = le16toh(val); 2237 sc->temp_off = sc->eeprom_temp - (volt / -5); 2238 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2239 sc->eeprom_temp, volt, sc->temp_off); 2240 } else { 2241 /* Read crystal calibration. */ 2242 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2243 &sc->eeprom_crystal, sizeof (uint32_t)); 2244 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2245 le32toh(sc->eeprom_crystal)); 2246 } 2247 2248 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2249 2250 } 2251 2252 /* 2253 * Translate EEPROM flags to net80211. 2254 */ 2255 static uint32_t 2256 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2257 { 2258 uint32_t nflags; 2259 2260 nflags = 0; 2261 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2262 nflags |= IEEE80211_CHAN_PASSIVE; 2263 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2264 nflags |= IEEE80211_CHAN_NOADHOC; 2265 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2266 nflags |= IEEE80211_CHAN_DFS; 2267 /* XXX apparently IBSS may still be marked */ 2268 nflags |= IEEE80211_CHAN_NOADHOC; 2269 } 2270 2271 return nflags; 2272 } 2273 2274 static void 2275 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2276 { 2277 struct ifnet *ifp = sc->sc_ifp; 2278 struct ieee80211com *ic = ifp->if_l2com; 2279 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2280 const struct iwn_chan_band *band = &iwn_bands[n]; 2281 struct ieee80211_channel *c; 2282 uint8_t chan; 2283 int i, nflags; 2284 2285 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2286 2287 for (i = 0; i < band->nchan; i++) { 2288 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2289 DPRINTF(sc, IWN_DEBUG_RESET, 2290 "skip chan %d flags 0x%x maxpwr %d\n", 2291 band->chan[i], channels[i].flags, 2292 channels[i].maxpwr); 2293 continue; 2294 } 2295 chan = band->chan[i]; 2296 nflags = iwn_eeprom_channel_flags(&channels[i]); 2297 2298 c = &ic->ic_channels[ic->ic_nchans++]; 2299 c->ic_ieee = chan; 2300 c->ic_maxregpower = channels[i].maxpwr; 2301 c->ic_maxpower = 2*c->ic_maxregpower; 2302 2303 if (n == 0) { /* 2GHz band */ 2304 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2305 /* G =>'s B is supported */ 2306 c->ic_flags = IEEE80211_CHAN_B | nflags; 2307 c = &ic->ic_channels[ic->ic_nchans++]; 2308 c[0] = c[-1]; 2309 c->ic_flags = IEEE80211_CHAN_G | nflags; 2310 } else { /* 5GHz band */ 2311 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2312 c->ic_flags = IEEE80211_CHAN_A | nflags; 2313 } 2314 2315 /* Save maximum allowed TX power for this channel. */ 2316 sc->maxpwr[chan] = channels[i].maxpwr; 2317 2318 DPRINTF(sc, IWN_DEBUG_RESET, 2319 "add chan %d flags 0x%x maxpwr %d\n", chan, 2320 channels[i].flags, channels[i].maxpwr); 2321 2322 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2323 /* add HT20, HT40 added separately */ 2324 c = &ic->ic_channels[ic->ic_nchans++]; 2325 c[0] = c[-1]; 2326 c->ic_flags |= IEEE80211_CHAN_HT20; 2327 } 2328 } 2329 2330 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2331 2332 } 2333 2334 static void 2335 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2336 { 2337 struct ifnet *ifp = sc->sc_ifp; 2338 struct ieee80211com *ic = ifp->if_l2com; 2339 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2340 const struct iwn_chan_band *band = &iwn_bands[n]; 2341 struct ieee80211_channel *c, *cent, *extc; 2342 uint8_t chan; 2343 int i, nflags; 2344 2345 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2346 2347 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2348 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2349 return; 2350 } 2351 2352 for (i = 0; i < band->nchan; i++) { 2353 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2354 DPRINTF(sc, IWN_DEBUG_RESET, 2355 "skip chan %d flags 0x%x maxpwr %d\n", 2356 band->chan[i], channels[i].flags, 2357 channels[i].maxpwr); 2358 continue; 2359 } 2360 chan = band->chan[i]; 2361 nflags = iwn_eeprom_channel_flags(&channels[i]); 2362 2363 /* 2364 * Each entry defines an HT40 channel pair; find the 2365 * center channel, then the extension channel above. 2366 */ 2367 cent = ieee80211_find_channel_byieee(ic, chan, 2368 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2369 if (cent == NULL) { /* XXX shouldn't happen */ 2370 device_printf(sc->sc_dev, 2371 "%s: no entry for channel %d\n", __func__, chan); 2372 continue; 2373 } 2374 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2375 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2376 if (extc == NULL) { 2377 DPRINTF(sc, IWN_DEBUG_RESET, 2378 "%s: skip chan %d, extension channel not found\n", 2379 __func__, chan); 2380 continue; 2381 } 2382 2383 DPRINTF(sc, IWN_DEBUG_RESET, 2384 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2385 chan, channels[i].flags, channels[i].maxpwr); 2386 2387 c = &ic->ic_channels[ic->ic_nchans++]; 2388 c[0] = cent[0]; 2389 c->ic_extieee = extc->ic_ieee; 2390 c->ic_flags &= ~IEEE80211_CHAN_HT; 2391 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2392 c = &ic->ic_channels[ic->ic_nchans++]; 2393 c[0] = extc[0]; 2394 c->ic_extieee = cent->ic_ieee; 2395 c->ic_flags &= ~IEEE80211_CHAN_HT; 2396 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2397 } 2398 2399 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2400 2401 } 2402 2403 static void 2404 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2405 { 2406 struct ifnet *ifp = sc->sc_ifp; 2407 struct ieee80211com *ic = ifp->if_l2com; 2408 2409 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2410 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2411 2412 if (n < 5) 2413 iwn_read_eeprom_band(sc, n); 2414 else 2415 iwn_read_eeprom_ht40(sc, n); 2416 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2417 } 2418 2419 static struct iwn_eeprom_chan * 2420 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2421 { 2422 int band, chan, i, j; 2423 2424 if (IEEE80211_IS_CHAN_HT40(c)) { 2425 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2426 if (IEEE80211_IS_CHAN_HT40D(c)) 2427 chan = c->ic_extieee; 2428 else 2429 chan = c->ic_ieee; 2430 for (i = 0; i < iwn_bands[band].nchan; i++) { 2431 if (iwn_bands[band].chan[i] == chan) 2432 return &sc->eeprom_channels[band][i]; 2433 } 2434 } else { 2435 for (j = 0; j < 5; j++) { 2436 for (i = 0; i < iwn_bands[j].nchan; i++) { 2437 if (iwn_bands[j].chan[i] == c->ic_ieee) 2438 return &sc->eeprom_channels[j][i]; 2439 } 2440 } 2441 } 2442 return NULL; 2443 } 2444 2445 /* 2446 * Enforce flags read from EEPROM. 2447 */ 2448 static int 2449 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2450 int nchan, struct ieee80211_channel chans[]) 2451 { 2452 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2453 int i; 2454 2455 for (i = 0; i < nchan; i++) { 2456 struct ieee80211_channel *c = &chans[i]; 2457 struct iwn_eeprom_chan *channel; 2458 2459 channel = iwn_find_eeprom_channel(sc, c); 2460 if (channel == NULL) { 2461 if_printf(ic->ic_ifp, 2462 "%s: invalid channel %u freq %u/0x%x\n", 2463 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2464 return EINVAL; 2465 } 2466 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2467 } 2468 2469 return 0; 2470 } 2471 2472 static void 2473 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2474 { 2475 struct iwn_eeprom_enhinfo enhinfo[35]; 2476 struct ifnet *ifp = sc->sc_ifp; 2477 struct ieee80211com *ic = ifp->if_l2com; 2478 struct ieee80211_channel *c; 2479 uint16_t val, base; 2480 int8_t maxpwr; 2481 uint8_t flags; 2482 int i, j; 2483 2484 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2485 2486 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2487 base = le16toh(val); 2488 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2489 enhinfo, sizeof enhinfo); 2490 2491 for (i = 0; i < nitems(enhinfo); i++) { 2492 flags = enhinfo[i].flags; 2493 if (!(flags & IWN_ENHINFO_VALID)) 2494 continue; /* Skip invalid entries. */ 2495 2496 maxpwr = 0; 2497 if (sc->txchainmask & IWN_ANT_A) 2498 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2499 if (sc->txchainmask & IWN_ANT_B) 2500 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2501 if (sc->txchainmask & IWN_ANT_C) 2502 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2503 if (sc->ntxchains == 2) 2504 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2505 else if (sc->ntxchains == 3) 2506 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2507 2508 for (j = 0; j < ic->ic_nchans; j++) { 2509 c = &ic->ic_channels[j]; 2510 if ((flags & IWN_ENHINFO_5GHZ)) { 2511 if (!IEEE80211_IS_CHAN_A(c)) 2512 continue; 2513 } else if ((flags & IWN_ENHINFO_OFDM)) { 2514 if (!IEEE80211_IS_CHAN_G(c)) 2515 continue; 2516 } else if (!IEEE80211_IS_CHAN_B(c)) 2517 continue; 2518 if ((flags & IWN_ENHINFO_HT40)) { 2519 if (!IEEE80211_IS_CHAN_HT40(c)) 2520 continue; 2521 } else { 2522 if (IEEE80211_IS_CHAN_HT40(c)) 2523 continue; 2524 } 2525 if (enhinfo[i].chan != 0 && 2526 enhinfo[i].chan != c->ic_ieee) 2527 continue; 2528 2529 DPRINTF(sc, IWN_DEBUG_RESET, 2530 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2531 c->ic_flags, maxpwr / 2); 2532 c->ic_maxregpower = maxpwr / 2; 2533 c->ic_maxpower = maxpwr; 2534 } 2535 } 2536 2537 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2538 2539 } 2540 2541 static struct ieee80211_node * 2542 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2543 { 2544 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2545 } 2546 2547 static __inline int 2548 rate2plcp(int rate) 2549 { 2550 switch (rate & 0xff) { 2551 case 12: return 0xd; 2552 case 18: return 0xf; 2553 case 24: return 0x5; 2554 case 36: return 0x7; 2555 case 48: return 0x9; 2556 case 72: return 0xb; 2557 case 96: return 0x1; 2558 case 108: return 0x3; 2559 case 2: return 10; 2560 case 4: return 20; 2561 case 11: return 55; 2562 case 22: return 110; 2563 } 2564 return 0; 2565 } 2566 2567 /* 2568 * Calculate the required PLCP value from the given rate, 2569 * to the given node. 2570 * 2571 * This will take the node configuration (eg 11n, rate table 2572 * setup, etc) into consideration. 2573 */ 2574 static uint32_t 2575 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2576 uint8_t rate) 2577 { 2578 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2579 struct ieee80211com *ic = ni->ni_ic; 2580 uint8_t txant1, txant2; 2581 uint32_t plcp = 0; 2582 int ridx; 2583 2584 /* Use the first valid TX antenna. */ 2585 txant1 = IWN_LSB(sc->txchainmask); 2586 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2587 2588 /* 2589 * If it's an MCS rate, let's set the plcp correctly 2590 * and set the relevant flags based on the node config. 2591 */ 2592 if (rate & IEEE80211_RATE_MCS) { 2593 /* 2594 * Set the initial PLCP value to be between 0->31 for 2595 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2596 * flag. 2597 */ 2598 plcp = RV(rate) | IWN_RFLAG_MCS; 2599 2600 /* 2601 * XXX the following should only occur if both 2602 * the local configuration _and_ the remote node 2603 * advertise these capabilities. Thus this code 2604 * may need fixing! 2605 */ 2606 2607 /* 2608 * Set the channel width and guard interval. 2609 */ 2610 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2611 plcp |= IWN_RFLAG_HT40; 2612 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2613 plcp |= IWN_RFLAG_SGI; 2614 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2615 plcp |= IWN_RFLAG_SGI; 2616 } 2617 2618 /* 2619 * If it's a two stream rate, enable TX on both 2620 * antennas. 2621 * 2622 * XXX three stream rates? 2623 */ 2624 if (rate > 0x87) 2625 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2626 else 2627 plcp |= IWN_RFLAG_ANT(txant1); 2628 } else { 2629 /* 2630 * Set the initial PLCP - fine for both 2631 * OFDM and CCK rates. 2632 */ 2633 plcp = rate2plcp(rate); 2634 2635 /* Set CCK flag if it's CCK */ 2636 2637 /* XXX It would be nice to have a method 2638 * to map the ridx -> phy table entry 2639 * so we could just query that, rather than 2640 * this hack to check against IWN_RIDX_OFDM6. 2641 */ 2642 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2643 rate & IEEE80211_RATE_VAL); 2644 if (ridx < IWN_RIDX_OFDM6 && 2645 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2646 plcp |= IWN_RFLAG_CCK; 2647 2648 /* Set antenna configuration */ 2649 plcp |= IWN_RFLAG_ANT(txant1); 2650 } 2651 2652 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2653 __func__, 2654 rate, 2655 plcp); 2656 2657 return (htole32(plcp)); 2658 #undef RV 2659 } 2660 2661 static void 2662 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2663 { 2664 /* Doesn't do anything at the moment */ 2665 } 2666 2667 static int 2668 iwn_media_change(struct ifnet *ifp) 2669 { 2670 int error; 2671 2672 error = ieee80211_media_change(ifp); 2673 /* NB: only the fixed rate can change and that doesn't need a reset */ 2674 return (error == ENETRESET ? 0 : error); 2675 } 2676 2677 static int 2678 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2679 { 2680 struct iwn_vap *ivp = IWN_VAP(vap); 2681 struct ieee80211com *ic = vap->iv_ic; 2682 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2683 int error = 0; 2684 2685 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2686 2687 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2688 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2689 2690 IEEE80211_UNLOCK(ic); 2691 IWN_LOCK(sc); 2692 callout_stop(&sc->calib_to); 2693 2694 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2695 2696 switch (nstate) { 2697 case IEEE80211_S_ASSOC: 2698 if (vap->iv_state != IEEE80211_S_RUN) 2699 break; 2700 /* FALLTHROUGH */ 2701 case IEEE80211_S_AUTH: 2702 if (vap->iv_state == IEEE80211_S_AUTH) 2703 break; 2704 2705 /* 2706 * !AUTH -> AUTH transition requires state reset to handle 2707 * reassociations correctly. 2708 */ 2709 sc->rxon->associd = 0; 2710 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2711 sc->calib.state = IWN_CALIB_STATE_INIT; 2712 2713 if ((error = iwn_auth(sc, vap)) != 0) { 2714 device_printf(sc->sc_dev, 2715 "%s: could not move to auth state\n", __func__); 2716 } 2717 break; 2718 2719 case IEEE80211_S_RUN: 2720 /* 2721 * RUN -> RUN transition; Just restart the timers. 2722 */ 2723 if (vap->iv_state == IEEE80211_S_RUN) { 2724 sc->calib_cnt = 0; 2725 break; 2726 } 2727 2728 /* 2729 * !RUN -> RUN requires setting the association id 2730 * which is done with a firmware cmd. We also defer 2731 * starting the timers until that work is done. 2732 */ 2733 if ((error = iwn_run(sc, vap)) != 0) { 2734 device_printf(sc->sc_dev, 2735 "%s: could not move to run state\n", __func__); 2736 } 2737 break; 2738 2739 case IEEE80211_S_INIT: 2740 sc->calib.state = IWN_CALIB_STATE_INIT; 2741 break; 2742 2743 default: 2744 break; 2745 } 2746 IWN_UNLOCK(sc); 2747 IEEE80211_LOCK(ic); 2748 if (error != 0){ 2749 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2750 return error; 2751 } 2752 2753 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2754 2755 return ivp->iv_newstate(vap, nstate, arg); 2756 } 2757 2758 static void 2759 iwn_calib_timeout(void *arg) 2760 { 2761 struct iwn_softc *sc = arg; 2762 2763 IWN_LOCK_ASSERT(sc); 2764 2765 /* Force automatic TX power calibration every 60 secs. */ 2766 if (++sc->calib_cnt >= 120) { 2767 uint32_t flags = 0; 2768 2769 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2770 "sending request for statistics"); 2771 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2772 sizeof flags, 1); 2773 sc->calib_cnt = 0; 2774 } 2775 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2776 sc); 2777 } 2778 2779 /* 2780 * Process an RX_PHY firmware notification. This is usually immediately 2781 * followed by an MPDU_RX_DONE notification. 2782 */ 2783 static void 2784 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2785 struct iwn_rx_data *data) 2786 { 2787 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2788 2789 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2790 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2791 2792 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2793 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2794 sc->last_rx_valid = 1; 2795 } 2796 2797 /* 2798 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2799 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2800 */ 2801 static void 2802 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2803 struct iwn_rx_data *data) 2804 { 2805 struct iwn_ops *ops = &sc->ops; 2806 struct ifnet *ifp = sc->sc_ifp; 2807 struct ieee80211com *ic = ifp->if_l2com; 2808 struct iwn_rx_ring *ring = &sc->rxq; 2809 struct ieee80211_frame *wh; 2810 struct ieee80211_node *ni; 2811 struct mbuf *m, *m1; 2812 struct iwn_rx_stat *stat; 2813 caddr_t head; 2814 bus_addr_t paddr; 2815 uint32_t flags; 2816 int error, len, rssi, nf; 2817 2818 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2819 2820 if (desc->type == IWN_MPDU_RX_DONE) { 2821 /* Check for prior RX_PHY notification. */ 2822 if (!sc->last_rx_valid) { 2823 DPRINTF(sc, IWN_DEBUG_ANY, 2824 "%s: missing RX_PHY\n", __func__); 2825 return; 2826 } 2827 stat = &sc->last_rx_stat; 2828 } else 2829 stat = (struct iwn_rx_stat *)(desc + 1); 2830 2831 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2832 2833 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2834 device_printf(sc->sc_dev, 2835 "%s: invalid RX statistic header, len %d\n", __func__, 2836 stat->cfg_phy_len); 2837 return; 2838 } 2839 if (desc->type == IWN_MPDU_RX_DONE) { 2840 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2841 head = (caddr_t)(mpdu + 1); 2842 len = le16toh(mpdu->len); 2843 } else { 2844 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2845 len = le16toh(stat->len); 2846 } 2847 2848 flags = le32toh(*(uint32_t *)(head + len)); 2849 2850 /* Discard frames with a bad FCS early. */ 2851 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2852 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2853 __func__, flags); 2854 ifp->if_ierrors++; 2855 return; 2856 } 2857 /* Discard frames that are too short. */ 2858 if (len < sizeof (*wh)) { 2859 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2860 __func__, len); 2861 ifp->if_ierrors++; 2862 return; 2863 } 2864 2865 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2866 if (m1 == NULL) { 2867 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2868 __func__); 2869 ifp->if_ierrors++; 2870 return; 2871 } 2872 bus_dmamap_unload(ring->data_dmat, data->map); 2873 2874 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2875 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2876 if (error != 0 && error != EFBIG) { 2877 device_printf(sc->sc_dev, 2878 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2879 m_freem(m1); 2880 2881 /* Try to reload the old mbuf. */ 2882 error = bus_dmamap_load(ring->data_dmat, data->map, 2883 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2884 &paddr, BUS_DMA_NOWAIT); 2885 if (error != 0 && error != EFBIG) { 2886 panic("%s: could not load old RX mbuf", __func__); 2887 } 2888 /* Physical address may have changed. */ 2889 ring->desc[ring->cur] = htole32(paddr >> 8); 2890 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2891 BUS_DMASYNC_PREWRITE); 2892 ifp->if_ierrors++; 2893 return; 2894 } 2895 2896 m = data->m; 2897 data->m = m1; 2898 /* Update RX descriptor. */ 2899 ring->desc[ring->cur] = htole32(paddr >> 8); 2900 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2901 BUS_DMASYNC_PREWRITE); 2902 2903 /* Finalize mbuf. */ 2904 m->m_pkthdr.rcvif = ifp; 2905 m->m_data = head; 2906 m->m_pkthdr.len = m->m_len = len; 2907 2908 /* Grab a reference to the source node. */ 2909 wh = mtod(m, struct ieee80211_frame *); 2910 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2911 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2912 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2913 2914 rssi = ops->get_rssi(sc, stat); 2915 2916 if (ieee80211_radiotap_active(ic)) { 2917 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2918 2919 tap->wr_flags = 0; 2920 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2921 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2922 tap->wr_dbm_antsignal = (int8_t)rssi; 2923 tap->wr_dbm_antnoise = (int8_t)nf; 2924 tap->wr_tsft = stat->tstamp; 2925 switch (stat->rate) { 2926 /* CCK rates. */ 2927 case 10: tap->wr_rate = 2; break; 2928 case 20: tap->wr_rate = 4; break; 2929 case 55: tap->wr_rate = 11; break; 2930 case 110: tap->wr_rate = 22; break; 2931 /* OFDM rates. */ 2932 case 0xd: tap->wr_rate = 12; break; 2933 case 0xf: tap->wr_rate = 18; break; 2934 case 0x5: tap->wr_rate = 24; break; 2935 case 0x7: tap->wr_rate = 36; break; 2936 case 0x9: tap->wr_rate = 48; break; 2937 case 0xb: tap->wr_rate = 72; break; 2938 case 0x1: tap->wr_rate = 96; break; 2939 case 0x3: tap->wr_rate = 108; break; 2940 /* Unknown rate: should not happen. */ 2941 default: tap->wr_rate = 0; 2942 } 2943 } 2944 2945 IWN_UNLOCK(sc); 2946 2947 /* Send the frame to the 802.11 layer. */ 2948 if (ni != NULL) { 2949 if (ni->ni_flags & IEEE80211_NODE_HT) 2950 m->m_flags |= M_AMPDU; 2951 (void)ieee80211_input(ni, m, rssi - nf, nf); 2952 /* Node is no longer needed. */ 2953 ieee80211_free_node(ni); 2954 } else 2955 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 2956 2957 IWN_LOCK(sc); 2958 2959 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2960 2961 } 2962 2963 /* Process an incoming Compressed BlockAck. */ 2964 static void 2965 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2966 struct iwn_rx_data *data) 2967 { 2968 struct iwn_ops *ops = &sc->ops; 2969 struct ifnet *ifp = sc->sc_ifp; 2970 struct iwn_node *wn; 2971 struct ieee80211_node *ni; 2972 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2973 struct iwn_tx_ring *txq; 2974 struct iwn_tx_data *txdata; 2975 struct ieee80211_tx_ampdu *tap; 2976 struct mbuf *m; 2977 uint64_t bitmap; 2978 uint16_t ssn; 2979 uint8_t tid; 2980 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 2981 2982 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2983 2984 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2985 2986 qid = le16toh(ba->qid); 2987 txq = &sc->txq[ba->qid]; 2988 tap = sc->qid2tap[ba->qid]; 2989 tid = tap->txa_tid; 2990 wn = (void *)tap->txa_ni; 2991 2992 res = NULL; 2993 ssn = 0; 2994 if (!IEEE80211_AMPDU_RUNNING(tap)) { 2995 res = tap->txa_private; 2996 ssn = tap->txa_start & 0xfff; 2997 } 2998 2999 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3000 txdata = &txq->data[txq->read]; 3001 3002 /* Unmap and free mbuf. */ 3003 bus_dmamap_sync(txq->data_dmat, txdata->map, 3004 BUS_DMASYNC_POSTWRITE); 3005 bus_dmamap_unload(txq->data_dmat, txdata->map); 3006 m = txdata->m, txdata->m = NULL; 3007 ni = txdata->ni, txdata->ni = NULL; 3008 3009 KASSERT(ni != NULL, ("no node")); 3010 KASSERT(m != NULL, ("no mbuf")); 3011 3012 ieee80211_tx_complete(ni, m, 1); 3013 3014 txq->queued--; 3015 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3016 } 3017 3018 if (txq->queued == 0 && res != NULL) { 3019 iwn_nic_lock(sc); 3020 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3021 iwn_nic_unlock(sc); 3022 sc->qid2tap[qid] = NULL; 3023 free(res, M_DEVBUF); 3024 return; 3025 } 3026 3027 if (wn->agg[tid].bitmap == 0) 3028 return; 3029 3030 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3031 if (shift < 0) 3032 shift += 0x100; 3033 3034 if (wn->agg[tid].nframes > (64 - shift)) 3035 return; 3036 3037 ni = tap->txa_ni; 3038 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3039 for (i = 0; bitmap; i++) { 3040 if ((bitmap & 1) == 0) { 3041 ifp->if_oerrors++; 3042 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3043 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3044 } else { 3045 ifp->if_opackets++; 3046 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3047 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3048 } 3049 bitmap >>= 1; 3050 } 3051 3052 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3053 3054 } 3055 3056 /* 3057 * Process a CALIBRATION_RESULT notification sent by the initialization 3058 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3059 */ 3060 static void 3061 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3062 struct iwn_rx_data *data) 3063 { 3064 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3065 int len, idx = -1; 3066 3067 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3068 3069 /* Runtime firmware should not send such a notification. */ 3070 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3071 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3072 __func__); 3073 return; 3074 } 3075 len = (le32toh(desc->len) & 0x3fff) - 4; 3076 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3077 3078 switch (calib->code) { 3079 case IWN5000_PHY_CALIB_DC: 3080 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3081 idx = 0; 3082 break; 3083 case IWN5000_PHY_CALIB_LO: 3084 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3085 idx = 1; 3086 break; 3087 case IWN5000_PHY_CALIB_TX_IQ: 3088 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3089 idx = 2; 3090 break; 3091 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3092 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3093 idx = 3; 3094 break; 3095 case IWN5000_PHY_CALIB_BASE_BAND: 3096 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3097 idx = 4; 3098 break; 3099 } 3100 if (idx == -1) /* Ignore other results. */ 3101 return; 3102 3103 /* Save calibration result. */ 3104 if (sc->calibcmd[idx].buf != NULL) 3105 free(sc->calibcmd[idx].buf, M_DEVBUF); 3106 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 3107 if (sc->calibcmd[idx].buf == NULL) { 3108 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3109 "not enough memory for calibration result %d\n", 3110 calib->code); 3111 return; 3112 } 3113 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3114 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3115 sc->calibcmd[idx].len = len; 3116 memcpy(sc->calibcmd[idx].buf, calib, len); 3117 } 3118 3119 /* 3120 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3121 * The latter is sent by the firmware after each received beacon. 3122 */ 3123 static void 3124 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3125 struct iwn_rx_data *data) 3126 { 3127 struct iwn_ops *ops = &sc->ops; 3128 struct ifnet *ifp = sc->sc_ifp; 3129 struct ieee80211com *ic = ifp->if_l2com; 3130 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3131 struct iwn_calib_state *calib = &sc->calib; 3132 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3133 int temp; 3134 3135 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3136 3137 /* Ignore statistics received during a scan. */ 3138 if (vap->iv_state != IEEE80211_S_RUN || 3139 (ic->ic_flags & IEEE80211_F_SCAN)){ 3140 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3141 __func__); 3142 return; 3143 } 3144 3145 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3146 3147 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 3148 __func__, desc->type); 3149 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3150 3151 /* Test if temperature has changed. */ 3152 if (stats->general.temp != sc->rawtemp) { 3153 /* Convert "raw" temperature to degC. */ 3154 sc->rawtemp = stats->general.temp; 3155 temp = ops->get_temperature(sc); 3156 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3157 __func__, temp); 3158 3159 /* Update TX power if need be (4965AGN only). */ 3160 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3161 iwn4965_power_calibration(sc, temp); 3162 } 3163 3164 if (desc->type != IWN_BEACON_STATISTICS) 3165 return; /* Reply to a statistics request. */ 3166 3167 sc->noise = iwn_get_noise(&stats->rx.general); 3168 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3169 3170 /* Test that RSSI and noise are present in stats report. */ 3171 if (le32toh(stats->rx.general.flags) != 1) { 3172 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3173 "received statistics without RSSI"); 3174 return; 3175 } 3176 3177 if (calib->state == IWN_CALIB_STATE_ASSOC) 3178 iwn_collect_noise(sc, &stats->rx.general); 3179 else if (calib->state == IWN_CALIB_STATE_RUN) { 3180 iwn_tune_sensitivity(sc, &stats->rx); 3181 /* 3182 * XXX TODO: Only run the RX recovery if we're associated! 3183 */ 3184 iwn_check_rx_recovery(sc, stats); 3185 iwn_save_stats_counters(sc, stats); 3186 } 3187 3188 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3189 } 3190 3191 /* 3192 * Save the relevant statistic counters for the next calibration 3193 * pass. 3194 */ 3195 static void 3196 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3197 { 3198 struct iwn_calib_state *calib = &sc->calib; 3199 3200 /* Save counters values for next call. */ 3201 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3202 calib->fa_cck = le32toh(rs->rx.cck.fa); 3203 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3204 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3205 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3206 3207 /* Last time we received these tick values */ 3208 sc->last_calib_ticks = ticks; 3209 } 3210 3211 /* 3212 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3213 * and 5000 adapters have different incompatible TX status formats. 3214 */ 3215 static void 3216 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3217 struct iwn_rx_data *data) 3218 { 3219 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3220 struct iwn_tx_ring *ring; 3221 int qid; 3222 3223 qid = desc->qid & 0xf; 3224 ring = &sc->txq[qid]; 3225 3226 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3227 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3228 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3229 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3230 le32toh(stat->status)); 3231 3232 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3233 if (qid >= sc->firstaggqueue) { 3234 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3235 &stat->status); 3236 } else { 3237 iwn_tx_done(sc, desc, stat->ackfailcnt, 3238 le32toh(stat->status) & 0xff); 3239 } 3240 } 3241 3242 static void 3243 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3244 struct iwn_rx_data *data) 3245 { 3246 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3247 struct iwn_tx_ring *ring; 3248 int qid; 3249 3250 qid = desc->qid & 0xf; 3251 ring = &sc->txq[qid]; 3252 3253 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3254 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3255 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3256 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3257 le32toh(stat->status)); 3258 3259 #ifdef notyet 3260 /* Reset TX scheduler slot. */ 3261 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3262 #endif 3263 3264 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3265 if (qid >= sc->firstaggqueue) { 3266 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3267 &stat->status); 3268 } else { 3269 iwn_tx_done(sc, desc, stat->ackfailcnt, 3270 le16toh(stat->status) & 0xff); 3271 } 3272 } 3273 3274 /* 3275 * Adapter-independent backend for TX_DONE firmware notifications. 3276 */ 3277 static void 3278 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3279 uint8_t status) 3280 { 3281 struct ifnet *ifp = sc->sc_ifp; 3282 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3283 struct iwn_tx_data *data = &ring->data[desc->idx]; 3284 struct mbuf *m; 3285 struct ieee80211_node *ni; 3286 struct ieee80211vap *vap; 3287 3288 KASSERT(data->ni != NULL, ("no node")); 3289 3290 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3291 3292 /* Unmap and free mbuf. */ 3293 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3294 bus_dmamap_unload(ring->data_dmat, data->map); 3295 m = data->m, data->m = NULL; 3296 ni = data->ni, data->ni = NULL; 3297 vap = ni->ni_vap; 3298 3299 /* 3300 * Update rate control statistics for the node. 3301 */ 3302 if (status & IWN_TX_FAIL) { 3303 ifp->if_oerrors++; 3304 ieee80211_ratectl_tx_complete(vap, ni, 3305 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3306 } else { 3307 ifp->if_opackets++; 3308 ieee80211_ratectl_tx_complete(vap, ni, 3309 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3310 } 3311 3312 /* 3313 * Channels marked for "radar" require traffic to be received 3314 * to unlock before we can transmit. Until traffic is seen 3315 * any attempt to transmit is returned immediately with status 3316 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3317 * happen on first authenticate after scanning. To workaround 3318 * this we ignore a failure of this sort in AUTH state so the 3319 * 802.11 layer will fall back to using a timeout to wait for 3320 * the AUTH reply. This allows the firmware time to see 3321 * traffic so a subsequent retry of AUTH succeeds. It's 3322 * unclear why the firmware does not maintain state for 3323 * channels recently visited as this would allow immediate 3324 * use of the channel after a scan (where we see traffic). 3325 */ 3326 if (status == IWN_TX_FAIL_TX_LOCKED && 3327 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3328 ieee80211_tx_complete(ni, m, 0); 3329 else 3330 ieee80211_tx_complete(ni, m, 3331 (status & IWN_TX_FAIL) != 0); 3332 3333 sc->sc_tx_timer = 0; 3334 if (--ring->queued < IWN_TX_RING_LOMARK) { 3335 sc->qfullmsk &= ~(1 << ring->qid); 3336 if (sc->qfullmsk == 0 && 3337 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3338 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3339 iwn_start_locked(ifp); 3340 } 3341 } 3342 3343 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3344 3345 } 3346 3347 /* 3348 * Process a "command done" firmware notification. This is where we wakeup 3349 * processes waiting for a synchronous command completion. 3350 */ 3351 static void 3352 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3353 { 3354 struct iwn_tx_ring *ring; 3355 struct iwn_tx_data *data; 3356 int cmd_queue_num; 3357 3358 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3359 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3360 else 3361 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3362 3363 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3364 return; /* Not a command ack. */ 3365 3366 ring = &sc->txq[cmd_queue_num]; 3367 data = &ring->data[desc->idx]; 3368 3369 /* If the command was mapped in an mbuf, free it. */ 3370 if (data->m != NULL) { 3371 bus_dmamap_sync(ring->data_dmat, data->map, 3372 BUS_DMASYNC_POSTWRITE); 3373 bus_dmamap_unload(ring->data_dmat, data->map); 3374 m_freem(data->m); 3375 data->m = NULL; 3376 } 3377 wakeup(&ring->desc[desc->idx]); 3378 } 3379 3380 static void 3381 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3382 void *stat) 3383 { 3384 struct iwn_ops *ops = &sc->ops; 3385 struct ifnet *ifp = sc->sc_ifp; 3386 struct iwn_tx_ring *ring = &sc->txq[qid]; 3387 struct iwn_tx_data *data; 3388 struct mbuf *m; 3389 struct iwn_node *wn; 3390 struct ieee80211_node *ni; 3391 struct ieee80211_tx_ampdu *tap; 3392 uint64_t bitmap; 3393 uint32_t *status = stat; 3394 uint16_t *aggstatus = stat; 3395 uint16_t ssn; 3396 uint8_t tid; 3397 int bit, i, lastidx, *res, seqno, shift, start; 3398 3399 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3400 3401 if (nframes == 1) { 3402 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3403 #ifdef NOT_YET 3404 printf("ieee80211_send_bar()\n"); 3405 #endif 3406 /* 3407 * If we completely fail a transmit, make sure a 3408 * notification is pushed up to the rate control 3409 * layer. 3410 */ 3411 tap = sc->qid2tap[qid]; 3412 tid = tap->txa_tid; 3413 wn = (void *)tap->txa_ni; 3414 ni = tap->txa_ni; 3415 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3416 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3417 } 3418 } 3419 3420 bitmap = 0; 3421 start = idx; 3422 for (i = 0; i < nframes; i++) { 3423 if (le16toh(aggstatus[i * 2]) & 0xc) 3424 continue; 3425 3426 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3427 bit = idx - start; 3428 shift = 0; 3429 if (bit >= 64) { 3430 shift = 0x100 - idx + start; 3431 bit = 0; 3432 start = idx; 3433 } else if (bit <= -64) 3434 bit = 0x100 - start + idx; 3435 else if (bit < 0) { 3436 shift = start - idx; 3437 start = idx; 3438 bit = 0; 3439 } 3440 bitmap = bitmap << shift; 3441 bitmap |= 1ULL << bit; 3442 } 3443 tap = sc->qid2tap[qid]; 3444 tid = tap->txa_tid; 3445 wn = (void *)tap->txa_ni; 3446 wn->agg[tid].bitmap = bitmap; 3447 wn->agg[tid].startidx = start; 3448 wn->agg[tid].nframes = nframes; 3449 3450 res = NULL; 3451 ssn = 0; 3452 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3453 res = tap->txa_private; 3454 ssn = tap->txa_start & 0xfff; 3455 } 3456 3457 seqno = le32toh(*(status + nframes)) & 0xfff; 3458 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3459 data = &ring->data[ring->read]; 3460 3461 /* Unmap and free mbuf. */ 3462 bus_dmamap_sync(ring->data_dmat, data->map, 3463 BUS_DMASYNC_POSTWRITE); 3464 bus_dmamap_unload(ring->data_dmat, data->map); 3465 m = data->m, data->m = NULL; 3466 ni = data->ni, data->ni = NULL; 3467 3468 KASSERT(ni != NULL, ("no node")); 3469 KASSERT(m != NULL, ("no mbuf")); 3470 3471 ieee80211_tx_complete(ni, m, 1); 3472 3473 ring->queued--; 3474 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3475 } 3476 3477 if (ring->queued == 0 && res != NULL) { 3478 iwn_nic_lock(sc); 3479 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3480 iwn_nic_unlock(sc); 3481 sc->qid2tap[qid] = NULL; 3482 free(res, M_DEVBUF); 3483 return; 3484 } 3485 3486 sc->sc_tx_timer = 0; 3487 if (ring->queued < IWN_TX_RING_LOMARK) { 3488 sc->qfullmsk &= ~(1 << ring->qid); 3489 if (sc->qfullmsk == 0 && 3490 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3491 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3492 iwn_start_locked(ifp); 3493 } 3494 } 3495 3496 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3497 3498 } 3499 3500 /* 3501 * Process an INT_FH_RX or INT_SW_RX interrupt. 3502 */ 3503 static void 3504 iwn_notif_intr(struct iwn_softc *sc) 3505 { 3506 struct iwn_ops *ops = &sc->ops; 3507 struct ifnet *ifp = sc->sc_ifp; 3508 struct ieee80211com *ic = ifp->if_l2com; 3509 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3510 uint16_t hw; 3511 3512 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3513 BUS_DMASYNC_POSTREAD); 3514 3515 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3516 while (sc->rxq.cur != hw) { 3517 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3518 struct iwn_rx_desc *desc; 3519 3520 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3521 BUS_DMASYNC_POSTREAD); 3522 desc = mtod(data->m, struct iwn_rx_desc *); 3523 3524 DPRINTF(sc, IWN_DEBUG_RECV, 3525 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3526 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3527 desc->type, iwn_intr_str(desc->type), 3528 le16toh(desc->len)); 3529 3530 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3531 iwn_cmd_done(sc, desc); 3532 3533 switch (desc->type) { 3534 case IWN_RX_PHY: 3535 iwn_rx_phy(sc, desc, data); 3536 break; 3537 3538 case IWN_RX_DONE: /* 4965AGN only. */ 3539 case IWN_MPDU_RX_DONE: 3540 /* An 802.11 frame has been received. */ 3541 iwn_rx_done(sc, desc, data); 3542 break; 3543 3544 case IWN_RX_COMPRESSED_BA: 3545 /* A Compressed BlockAck has been received. */ 3546 iwn_rx_compressed_ba(sc, desc, data); 3547 break; 3548 3549 case IWN_TX_DONE: 3550 /* An 802.11 frame has been transmitted. */ 3551 ops->tx_done(sc, desc, data); 3552 break; 3553 3554 case IWN_RX_STATISTICS: 3555 case IWN_BEACON_STATISTICS: 3556 iwn_rx_statistics(sc, desc, data); 3557 break; 3558 3559 case IWN_BEACON_MISSED: 3560 { 3561 struct iwn_beacon_missed *miss = 3562 (struct iwn_beacon_missed *)(desc + 1); 3563 int misses; 3564 3565 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3566 BUS_DMASYNC_POSTREAD); 3567 misses = le32toh(miss->consecutive); 3568 3569 DPRINTF(sc, IWN_DEBUG_STATE, 3570 "%s: beacons missed %d/%d\n", __func__, 3571 misses, le32toh(miss->total)); 3572 /* 3573 * If more than 5 consecutive beacons are missed, 3574 * reinitialize the sensitivity state machine. 3575 */ 3576 if (vap->iv_state == IEEE80211_S_RUN && 3577 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3578 if (misses > 5) 3579 (void)iwn_init_sensitivity(sc); 3580 if (misses >= vap->iv_bmissthreshold) { 3581 IWN_UNLOCK(sc); 3582 ieee80211_beacon_miss(ic); 3583 IWN_LOCK(sc); 3584 } 3585 } 3586 break; 3587 } 3588 case IWN_UC_READY: 3589 { 3590 struct iwn_ucode_info *uc = 3591 (struct iwn_ucode_info *)(desc + 1); 3592 3593 /* The microcontroller is ready. */ 3594 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3595 BUS_DMASYNC_POSTREAD); 3596 DPRINTF(sc, IWN_DEBUG_RESET, 3597 "microcode alive notification version=%d.%d " 3598 "subtype=%x alive=%x\n", uc->major, uc->minor, 3599 uc->subtype, le32toh(uc->valid)); 3600 3601 if (le32toh(uc->valid) != 1) { 3602 device_printf(sc->sc_dev, 3603 "microcontroller initialization failed"); 3604 break; 3605 } 3606 if (uc->subtype == IWN_UCODE_INIT) { 3607 /* Save microcontroller report. */ 3608 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3609 } 3610 /* Save the address of the error log in SRAM. */ 3611 sc->errptr = le32toh(uc->errptr); 3612 break; 3613 } 3614 case IWN_STATE_CHANGED: 3615 { 3616 /* 3617 * State change allows hardware switch change to be 3618 * noted. However, we handle this in iwn_intr as we 3619 * get both the enable/disble intr. 3620 */ 3621 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3622 BUS_DMASYNC_POSTREAD); 3623 #ifdef IWN_DEBUG 3624 uint32_t *status = (uint32_t *)(desc + 1); 3625 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3626 "state changed to %x\n", 3627 le32toh(*status)); 3628 #endif 3629 break; 3630 } 3631 case IWN_START_SCAN: 3632 { 3633 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3634 BUS_DMASYNC_POSTREAD); 3635 #ifdef IWN_DEBUG 3636 struct iwn_start_scan *scan = 3637 (struct iwn_start_scan *)(desc + 1); 3638 DPRINTF(sc, IWN_DEBUG_ANY, 3639 "%s: scanning channel %d status %x\n", 3640 __func__, scan->chan, le32toh(scan->status)); 3641 #endif 3642 break; 3643 } 3644 case IWN_STOP_SCAN: 3645 { 3646 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3647 BUS_DMASYNC_POSTREAD); 3648 #ifdef IWN_DEBUG 3649 struct iwn_stop_scan *scan = 3650 (struct iwn_stop_scan *)(desc + 1); 3651 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3652 "scan finished nchan=%d status=%d chan=%d\n", 3653 scan->nchan, scan->status, scan->chan); 3654 #endif 3655 sc->sc_is_scanning = 0; 3656 IWN_UNLOCK(sc); 3657 ieee80211_scan_next(vap); 3658 IWN_LOCK(sc); 3659 break; 3660 } 3661 case IWN5000_CALIBRATION_RESULT: 3662 iwn5000_rx_calib_results(sc, desc, data); 3663 break; 3664 3665 case IWN5000_CALIBRATION_DONE: 3666 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3667 wakeup(sc); 3668 break; 3669 } 3670 3671 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3672 } 3673 3674 /* Tell the firmware what we have processed. */ 3675 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3676 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3677 } 3678 3679 /* 3680 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3681 * from power-down sleep mode. 3682 */ 3683 static void 3684 iwn_wakeup_intr(struct iwn_softc *sc) 3685 { 3686 int qid; 3687 3688 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3689 __func__); 3690 3691 /* Wakeup RX and TX rings. */ 3692 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3693 for (qid = 0; qid < sc->ntxqs; qid++) { 3694 struct iwn_tx_ring *ring = &sc->txq[qid]; 3695 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3696 } 3697 } 3698 3699 static void 3700 iwn_rftoggle_intr(struct iwn_softc *sc) 3701 { 3702 struct ifnet *ifp = sc->sc_ifp; 3703 struct ieee80211com *ic = ifp->if_l2com; 3704 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3705 3706 IWN_LOCK_ASSERT(sc); 3707 3708 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3709 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3710 if (tmp & IWN_GP_CNTRL_RFKILL) 3711 ieee80211_runtask(ic, &sc->sc_radioon_task); 3712 else 3713 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3714 } 3715 3716 /* 3717 * Dump the error log of the firmware when a firmware panic occurs. Although 3718 * we can't debug the firmware because it is neither open source nor free, it 3719 * can help us to identify certain classes of problems. 3720 */ 3721 static void 3722 iwn_fatal_intr(struct iwn_softc *sc) 3723 { 3724 struct iwn_fw_dump dump; 3725 int i; 3726 3727 IWN_LOCK_ASSERT(sc); 3728 3729 /* Force a complete recalibration on next init. */ 3730 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3731 3732 /* Check that the error log address is valid. */ 3733 if (sc->errptr < IWN_FW_DATA_BASE || 3734 sc->errptr + sizeof (dump) > 3735 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3736 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3737 sc->errptr); 3738 return; 3739 } 3740 if (iwn_nic_lock(sc) != 0) { 3741 printf("%s: could not read firmware error log\n", __func__); 3742 return; 3743 } 3744 /* Read firmware error log from SRAM. */ 3745 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3746 sizeof (dump) / sizeof (uint32_t)); 3747 iwn_nic_unlock(sc); 3748 3749 if (dump.valid == 0) { 3750 printf("%s: firmware error log is empty\n", __func__); 3751 return; 3752 } 3753 printf("firmware error log:\n"); 3754 printf(" error type = \"%s\" (0x%08X)\n", 3755 (dump.id < nitems(iwn_fw_errmsg)) ? 3756 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3757 dump.id); 3758 printf(" program counter = 0x%08X\n", dump.pc); 3759 printf(" source line = 0x%08X\n", dump.src_line); 3760 printf(" error data = 0x%08X%08X\n", 3761 dump.error_data[0], dump.error_data[1]); 3762 printf(" branch link = 0x%08X%08X\n", 3763 dump.branch_link[0], dump.branch_link[1]); 3764 printf(" interrupt link = 0x%08X%08X\n", 3765 dump.interrupt_link[0], dump.interrupt_link[1]); 3766 printf(" time = %u\n", dump.time[0]); 3767 3768 /* Dump driver status (TX and RX rings) while we're here. */ 3769 printf("driver status:\n"); 3770 for (i = 0; i < sc->ntxqs; i++) { 3771 struct iwn_tx_ring *ring = &sc->txq[i]; 3772 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3773 i, ring->qid, ring->cur, ring->queued); 3774 } 3775 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3776 } 3777 3778 static void 3779 iwn_intr(void *arg) 3780 { 3781 struct iwn_softc *sc = arg; 3782 struct ifnet *ifp = sc->sc_ifp; 3783 uint32_t r1, r2, tmp; 3784 3785 IWN_LOCK(sc); 3786 3787 /* Disable interrupts. */ 3788 IWN_WRITE(sc, IWN_INT_MASK, 0); 3789 3790 /* Read interrupts from ICT (fast) or from registers (slow). */ 3791 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3792 tmp = 0; 3793 while (sc->ict[sc->ict_cur] != 0) { 3794 tmp |= sc->ict[sc->ict_cur]; 3795 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3796 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3797 } 3798 tmp = le32toh(tmp); 3799 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3800 tmp = 0; 3801 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3802 tmp |= 0x8000; 3803 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3804 r2 = 0; /* Unused. */ 3805 } else { 3806 r1 = IWN_READ(sc, IWN_INT); 3807 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3808 return; /* Hardware gone! */ 3809 r2 = IWN_READ(sc, IWN_FH_INT); 3810 } 3811 3812 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3813 , r1, r2); 3814 3815 if (r1 == 0 && r2 == 0) 3816 goto done; /* Interrupt not for us. */ 3817 3818 /* Acknowledge interrupts. */ 3819 IWN_WRITE(sc, IWN_INT, r1); 3820 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3821 IWN_WRITE(sc, IWN_FH_INT, r2); 3822 3823 if (r1 & IWN_INT_RF_TOGGLED) { 3824 iwn_rftoggle_intr(sc); 3825 goto done; 3826 } 3827 if (r1 & IWN_INT_CT_REACHED) { 3828 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3829 __func__); 3830 } 3831 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3832 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3833 __func__); 3834 #ifdef IWN_DEBUG 3835 iwn_debug_register(sc); 3836 #endif 3837 /* Dump firmware error log and stop. */ 3838 iwn_fatal_intr(sc); 3839 ifp->if_flags &= ~IFF_UP; 3840 iwn_stop_locked(sc); 3841 goto done; 3842 } 3843 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3844 (r2 & IWN_FH_INT_RX)) { 3845 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3846 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3847 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3848 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3849 IWN_INT_PERIODIC_DIS); 3850 iwn_notif_intr(sc); 3851 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3852 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3853 IWN_INT_PERIODIC_ENA); 3854 } 3855 } else 3856 iwn_notif_intr(sc); 3857 } 3858 3859 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3860 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3861 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3862 wakeup(sc); /* FH DMA transfer completed. */ 3863 } 3864 3865 if (r1 & IWN_INT_ALIVE) 3866 wakeup(sc); /* Firmware is alive. */ 3867 3868 if (r1 & IWN_INT_WAKEUP) 3869 iwn_wakeup_intr(sc); 3870 3871 done: 3872 /* Re-enable interrupts. */ 3873 if (ifp->if_flags & IFF_UP) 3874 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3875 3876 IWN_UNLOCK(sc); 3877 } 3878 3879 /* 3880 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3881 * 5000 adapters use a slightly different format). 3882 */ 3883 static void 3884 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3885 uint16_t len) 3886 { 3887 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3888 3889 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3890 3891 *w = htole16(len + 8); 3892 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3893 BUS_DMASYNC_PREWRITE); 3894 if (idx < IWN_SCHED_WINSZ) { 3895 *(w + IWN_TX_RING_COUNT) = *w; 3896 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3897 BUS_DMASYNC_PREWRITE); 3898 } 3899 } 3900 3901 static void 3902 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3903 uint16_t len) 3904 { 3905 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3906 3907 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3908 3909 *w = htole16(id << 12 | (len + 8)); 3910 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3911 BUS_DMASYNC_PREWRITE); 3912 if (idx < IWN_SCHED_WINSZ) { 3913 *(w + IWN_TX_RING_COUNT) = *w; 3914 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3915 BUS_DMASYNC_PREWRITE); 3916 } 3917 } 3918 3919 #ifdef notyet 3920 static void 3921 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3922 { 3923 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3924 3925 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3926 3927 *w = (*w & htole16(0xf000)) | htole16(1); 3928 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3929 BUS_DMASYNC_PREWRITE); 3930 if (idx < IWN_SCHED_WINSZ) { 3931 *(w + IWN_TX_RING_COUNT) = *w; 3932 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3933 BUS_DMASYNC_PREWRITE); 3934 } 3935 } 3936 #endif 3937 3938 /* 3939 * Check whether OFDM 11g protection will be enabled for the given rate. 3940 * 3941 * The original driver code only enabled protection for OFDM rates. 3942 * It didn't check to see whether it was operating in 11a or 11bg mode. 3943 */ 3944 static int 3945 iwn_check_rate_needs_protection(struct iwn_softc *sc, 3946 struct ieee80211vap *vap, uint8_t rate) 3947 { 3948 struct ieee80211com *ic = vap->iv_ic; 3949 3950 /* 3951 * Not in 2GHz mode? Then there's no need to enable OFDM 3952 * 11bg protection. 3953 */ 3954 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 3955 return (0); 3956 } 3957 3958 /* 3959 * 11bg protection not enabled? Then don't use it. 3960 */ 3961 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 3962 return (0); 3963 3964 /* 3965 * If it's an 11n rate, then for now we enable 3966 * protection. 3967 */ 3968 if (rate & IEEE80211_RATE_MCS) { 3969 return (1); 3970 } 3971 3972 /* 3973 * Do a rate table lookup. If the PHY is CCK, 3974 * don't do protection. 3975 */ 3976 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 3977 return (0); 3978 3979 /* 3980 * Yup, enable protection. 3981 */ 3982 return (1); 3983 } 3984 3985 /* 3986 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 3987 * the link quality table that reflects this particular entry. 3988 */ 3989 static int 3990 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 3991 uint8_t rate) 3992 { 3993 struct ieee80211_rateset *rs; 3994 int is_11n; 3995 int nr; 3996 int i; 3997 uint8_t cmp_rate; 3998 3999 /* 4000 * Figure out if we're using 11n or not here. 4001 */ 4002 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4003 is_11n = 1; 4004 else 4005 is_11n = 0; 4006 4007 /* 4008 * Use the correct rate table. 4009 */ 4010 if (is_11n) { 4011 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4012 nr = ni->ni_htrates.rs_nrates; 4013 } else { 4014 rs = &ni->ni_rates; 4015 nr = rs->rs_nrates; 4016 } 4017 4018 /* 4019 * Find the relevant link quality entry in the table. 4020 */ 4021 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4022 /* 4023 * The link quality table index starts at 0 == highest 4024 * rate, so we walk the rate table backwards. 4025 */ 4026 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4027 if (rate & IEEE80211_RATE_MCS) 4028 cmp_rate |= IEEE80211_RATE_MCS; 4029 4030 #if 0 4031 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4032 __func__, 4033 i, 4034 nr, 4035 rate, 4036 cmp_rate); 4037 #endif 4038 4039 if (cmp_rate == rate) 4040 return (i); 4041 } 4042 4043 /* Failed? Start at the end */ 4044 return (IWN_MAX_TX_RETRIES - 1); 4045 } 4046 4047 static int 4048 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4049 { 4050 struct iwn_ops *ops = &sc->ops; 4051 const struct ieee80211_txparam *tp; 4052 struct ieee80211vap *vap = ni->ni_vap; 4053 struct ieee80211com *ic = ni->ni_ic; 4054 struct iwn_node *wn = (void *)ni; 4055 struct iwn_tx_ring *ring; 4056 struct iwn_tx_desc *desc; 4057 struct iwn_tx_data *data; 4058 struct iwn_tx_cmd *cmd; 4059 struct iwn_cmd_data *tx; 4060 struct ieee80211_frame *wh; 4061 struct ieee80211_key *k = NULL; 4062 struct mbuf *m1; 4063 uint32_t flags; 4064 uint16_t qos; 4065 u_int hdrlen; 4066 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4067 uint8_t tid, type; 4068 int ac, i, totlen, error, pad, nsegs = 0, rate; 4069 4070 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4071 4072 IWN_LOCK_ASSERT(sc); 4073 4074 wh = mtod(m, struct ieee80211_frame *); 4075 hdrlen = ieee80211_anyhdrsize(wh); 4076 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4077 4078 /* Select EDCA Access Category and TX ring for this frame. */ 4079 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4080 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4081 tid = qos & IEEE80211_QOS_TID; 4082 } else { 4083 qos = 0; 4084 tid = 0; 4085 } 4086 ac = M_WME_GETAC(m); 4087 if (m->m_flags & M_AMPDU_MPDU) { 4088 uint16_t seqno; 4089 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4090 4091 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4092 m_freem(m); 4093 return EINVAL; 4094 } 4095 4096 /* 4097 * Queue this frame to the hardware ring that we've 4098 * negotiated AMPDU TX on. 4099 * 4100 * Note that the sequence number must match the TX slot 4101 * being used! 4102 */ 4103 ac = *(int *)tap->txa_private; 4104 seqno = ni->ni_txseqs[tid]; 4105 *(uint16_t *)wh->i_seq = 4106 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4107 ring = &sc->txq[ac]; 4108 if ((seqno % 256) != ring->cur) { 4109 device_printf(sc->sc_dev, 4110 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4111 __func__, 4112 m, 4113 seqno, 4114 seqno % 256, 4115 ring->cur); 4116 } 4117 ni->ni_txseqs[tid]++; 4118 } 4119 ring = &sc->txq[ac]; 4120 desc = &ring->desc[ring->cur]; 4121 data = &ring->data[ring->cur]; 4122 4123 /* Choose a TX rate index. */ 4124 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4125 if (type == IEEE80211_FC0_TYPE_MGT) 4126 rate = tp->mgmtrate; 4127 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4128 rate = tp->mcastrate; 4129 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4130 rate = tp->ucastrate; 4131 else if (m->m_flags & M_EAPOL) 4132 rate = tp->mgmtrate; 4133 else { 4134 /* XXX pass pktlen */ 4135 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4136 rate = ni->ni_txrate; 4137 } 4138 4139 /* Encrypt the frame if need be. */ 4140 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 4141 /* Retrieve key for TX. */ 4142 k = ieee80211_crypto_encap(ni, m); 4143 if (k == NULL) { 4144 m_freem(m); 4145 return ENOBUFS; 4146 } 4147 /* 802.11 header may have moved. */ 4148 wh = mtod(m, struct ieee80211_frame *); 4149 } 4150 totlen = m->m_pkthdr.len; 4151 4152 if (ieee80211_radiotap_active_vap(vap)) { 4153 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4154 4155 tap->wt_flags = 0; 4156 tap->wt_rate = rate; 4157 if (k != NULL) 4158 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4159 4160 ieee80211_radiotap_tx(vap, m); 4161 } 4162 4163 /* Prepare TX firmware command. */ 4164 cmd = &ring->cmd[ring->cur]; 4165 cmd->code = IWN_CMD_TX_DATA; 4166 cmd->flags = 0; 4167 cmd->qid = ring->qid; 4168 cmd->idx = ring->cur; 4169 4170 tx = (struct iwn_cmd_data *)cmd->data; 4171 /* NB: No need to clear tx, all fields are reinitialized here. */ 4172 tx->scratch = 0; /* clear "scratch" area */ 4173 4174 flags = 0; 4175 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4176 /* Unicast frame, check if an ACK is expected. */ 4177 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4178 IEEE80211_QOS_ACKPOLICY_NOACK) 4179 flags |= IWN_TX_NEED_ACK; 4180 } 4181 if ((wh->i_fc[0] & 4182 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4183 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4184 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4185 4186 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4187 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4188 4189 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4190 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4191 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4192 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4193 flags |= IWN_TX_NEED_RTS; 4194 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4195 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4196 flags |= IWN_TX_NEED_CTS; 4197 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4198 flags |= IWN_TX_NEED_RTS; 4199 } 4200 4201 /* XXX HT protection? */ 4202 4203 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4204 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4205 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4206 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4207 flags |= IWN_TX_NEED_PROTECTION; 4208 } else 4209 flags |= IWN_TX_FULL_TXOP; 4210 } 4211 } 4212 4213 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4214 type != IEEE80211_FC0_TYPE_DATA) 4215 tx->id = sc->broadcast_id; 4216 else 4217 tx->id = wn->id; 4218 4219 if (type == IEEE80211_FC0_TYPE_MGT) { 4220 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4221 4222 /* Tell HW to set timestamp in probe responses. */ 4223 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4224 flags |= IWN_TX_INSERT_TSTAMP; 4225 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4226 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4227 tx->timeout = htole16(3); 4228 else 4229 tx->timeout = htole16(2); 4230 } else 4231 tx->timeout = htole16(0); 4232 4233 if (hdrlen & 3) { 4234 /* First segment length must be a multiple of 4. */ 4235 flags |= IWN_TX_NEED_PADDING; 4236 pad = 4 - (hdrlen & 3); 4237 } else 4238 pad = 0; 4239 4240 tx->len = htole16(totlen); 4241 tx->tid = tid; 4242 tx->rts_ntries = 60; 4243 tx->data_ntries = 15; 4244 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4245 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4246 if (tx->id == sc->broadcast_id) { 4247 /* Group or management frame. */ 4248 tx->linkq = 0; 4249 } else { 4250 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4251 flags |= IWN_TX_LINKQ; /* enable MRR */ 4252 } 4253 4254 /* Set physical address of "scratch area". */ 4255 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4256 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4257 4258 /* Copy 802.11 header in TX command. */ 4259 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4260 4261 /* Trim 802.11 header. */ 4262 m_adj(m, hdrlen); 4263 tx->security = 0; 4264 tx->flags = htole32(flags); 4265 4266 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4267 &nsegs, BUS_DMA_NOWAIT); 4268 if (error != 0) { 4269 if (error != EFBIG) { 4270 device_printf(sc->sc_dev, 4271 "%s: can't map mbuf (error %d)\n", __func__, error); 4272 m_freem(m); 4273 return error; 4274 } 4275 /* Too many DMA segments, linearize mbuf. */ 4276 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4277 if (m1 == NULL) { 4278 device_printf(sc->sc_dev, 4279 "%s: could not defrag mbuf\n", __func__); 4280 m_freem(m); 4281 return ENOBUFS; 4282 } 4283 m = m1; 4284 4285 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4286 segs, &nsegs, BUS_DMA_NOWAIT); 4287 if (error != 0) { 4288 device_printf(sc->sc_dev, 4289 "%s: can't map mbuf (error %d)\n", __func__, error); 4290 m_freem(m); 4291 return error; 4292 } 4293 } 4294 4295 data->m = m; 4296 data->ni = ni; 4297 4298 DPRINTF(sc, IWN_DEBUG_XMIT, 4299 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4300 __func__, 4301 ring->qid, 4302 ring->cur, 4303 m->m_pkthdr.len, 4304 nsegs, 4305 rate, 4306 tx->rate); 4307 4308 /* Fill TX descriptor. */ 4309 desc->nsegs = 1; 4310 if (m->m_len != 0) 4311 desc->nsegs += nsegs; 4312 /* First DMA segment is used by the TX command. */ 4313 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4314 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4315 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4316 /* Other DMA segments are for data payload. */ 4317 seg = &segs[0]; 4318 for (i = 1; i <= nsegs; i++) { 4319 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4320 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4321 seg->ds_len << 4); 4322 seg++; 4323 } 4324 4325 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4326 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4327 BUS_DMASYNC_PREWRITE); 4328 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4329 BUS_DMASYNC_PREWRITE); 4330 4331 /* Update TX scheduler. */ 4332 if (ring->qid >= sc->firstaggqueue) 4333 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4334 4335 /* Kick TX ring. */ 4336 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4337 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4338 4339 /* Mark TX ring as full if we reach a certain threshold. */ 4340 if (++ring->queued > IWN_TX_RING_HIMARK) 4341 sc->qfullmsk |= 1 << ring->qid; 4342 4343 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4344 4345 return 0; 4346 } 4347 4348 static int 4349 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4350 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4351 { 4352 struct iwn_ops *ops = &sc->ops; 4353 // struct ifnet *ifp = sc->sc_ifp; 4354 struct ieee80211vap *vap = ni->ni_vap; 4355 // struct ieee80211com *ic = ifp->if_l2com; 4356 struct iwn_tx_cmd *cmd; 4357 struct iwn_cmd_data *tx; 4358 struct ieee80211_frame *wh; 4359 struct iwn_tx_ring *ring; 4360 struct iwn_tx_desc *desc; 4361 struct iwn_tx_data *data; 4362 struct mbuf *m1; 4363 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4364 uint32_t flags; 4365 u_int hdrlen; 4366 int ac, totlen, error, pad, nsegs = 0, i, rate; 4367 uint8_t type; 4368 4369 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4370 4371 IWN_LOCK_ASSERT(sc); 4372 4373 wh = mtod(m, struct ieee80211_frame *); 4374 hdrlen = ieee80211_anyhdrsize(wh); 4375 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4376 4377 ac = params->ibp_pri & 3; 4378 4379 ring = &sc->txq[ac]; 4380 desc = &ring->desc[ring->cur]; 4381 data = &ring->data[ring->cur]; 4382 4383 /* Choose a TX rate. */ 4384 rate = params->ibp_rate0; 4385 totlen = m->m_pkthdr.len; 4386 4387 /* Prepare TX firmware command. */ 4388 cmd = &ring->cmd[ring->cur]; 4389 cmd->code = IWN_CMD_TX_DATA; 4390 cmd->flags = 0; 4391 cmd->qid = ring->qid; 4392 cmd->idx = ring->cur; 4393 4394 tx = (struct iwn_cmd_data *)cmd->data; 4395 /* NB: No need to clear tx, all fields are reinitialized here. */ 4396 tx->scratch = 0; /* clear "scratch" area */ 4397 4398 flags = 0; 4399 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4400 flags |= IWN_TX_NEED_ACK; 4401 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4402 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4403 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4404 flags &= ~IWN_TX_NEED_RTS; 4405 flags |= IWN_TX_NEED_PROTECTION; 4406 } else 4407 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4408 } 4409 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4410 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4411 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4412 flags &= ~IWN_TX_NEED_CTS; 4413 flags |= IWN_TX_NEED_PROTECTION; 4414 } else 4415 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4416 } 4417 if (type == IEEE80211_FC0_TYPE_MGT) { 4418 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4419 4420 /* Tell HW to set timestamp in probe responses. */ 4421 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4422 flags |= IWN_TX_INSERT_TSTAMP; 4423 4424 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4425 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4426 tx->timeout = htole16(3); 4427 else 4428 tx->timeout = htole16(2); 4429 } else 4430 tx->timeout = htole16(0); 4431 4432 if (hdrlen & 3) { 4433 /* First segment length must be a multiple of 4. */ 4434 flags |= IWN_TX_NEED_PADDING; 4435 pad = 4 - (hdrlen & 3); 4436 } else 4437 pad = 0; 4438 4439 if (ieee80211_radiotap_active_vap(vap)) { 4440 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4441 4442 tap->wt_flags = 0; 4443 tap->wt_rate = rate; 4444 4445 ieee80211_radiotap_tx(vap, m); 4446 } 4447 4448 tx->len = htole16(totlen); 4449 tx->tid = 0; 4450 tx->id = sc->broadcast_id; 4451 tx->rts_ntries = params->ibp_try1; 4452 tx->data_ntries = params->ibp_try0; 4453 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4454 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4455 4456 /* Group or management frame. */ 4457 tx->linkq = 0; 4458 4459 /* Set physical address of "scratch area". */ 4460 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4461 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4462 4463 /* Copy 802.11 header in TX command. */ 4464 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4465 4466 /* Trim 802.11 header. */ 4467 m_adj(m, hdrlen); 4468 tx->security = 0; 4469 tx->flags = htole32(flags); 4470 4471 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4472 &nsegs, BUS_DMA_NOWAIT); 4473 if (error != 0) { 4474 if (error != EFBIG) { 4475 device_printf(sc->sc_dev, 4476 "%s: can't map mbuf (error %d)\n", __func__, error); 4477 m_freem(m); 4478 return error; 4479 } 4480 /* Too many DMA segments, linearize mbuf. */ 4481 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 4482 if (m1 == NULL) { 4483 device_printf(sc->sc_dev, 4484 "%s: could not defrag mbuf\n", __func__); 4485 m_freem(m); 4486 return ENOBUFS; 4487 } 4488 m = m1; 4489 4490 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4491 segs, &nsegs, BUS_DMA_NOWAIT); 4492 if (error != 0) { 4493 device_printf(sc->sc_dev, 4494 "%s: can't map mbuf (error %d)\n", __func__, error); 4495 m_freem(m); 4496 return error; 4497 } 4498 } 4499 4500 data->m = m; 4501 data->ni = ni; 4502 4503 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4504 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4505 4506 /* Fill TX descriptor. */ 4507 desc->nsegs = 1; 4508 if (m->m_len != 0) 4509 desc->nsegs += nsegs; 4510 /* First DMA segment is used by the TX command. */ 4511 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4512 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4513 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4514 /* Other DMA segments are for data payload. */ 4515 seg = &segs[0]; 4516 for (i = 1; i <= nsegs; i++) { 4517 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4518 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4519 seg->ds_len << 4); 4520 seg++; 4521 } 4522 4523 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4524 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4525 BUS_DMASYNC_PREWRITE); 4526 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4527 BUS_DMASYNC_PREWRITE); 4528 4529 /* Update TX scheduler. */ 4530 if (ring->qid >= sc->firstaggqueue) 4531 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4532 4533 /* Kick TX ring. */ 4534 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4535 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4536 4537 /* Mark TX ring as full if we reach a certain threshold. */ 4538 if (++ring->queued > IWN_TX_RING_HIMARK) 4539 sc->qfullmsk |= 1 << ring->qid; 4540 4541 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4542 4543 return 0; 4544 } 4545 4546 static int 4547 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4548 const struct ieee80211_bpf_params *params) 4549 { 4550 struct ieee80211com *ic = ni->ni_ic; 4551 struct ifnet *ifp = ic->ic_ifp; 4552 struct iwn_softc *sc = ifp->if_softc; 4553 int error = 0; 4554 4555 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4556 4557 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4558 ieee80211_free_node(ni); 4559 m_freem(m); 4560 return ENETDOWN; 4561 } 4562 4563 IWN_LOCK(sc); 4564 if (params == NULL) { 4565 /* 4566 * Legacy path; interpret frame contents to decide 4567 * precisely how to send the frame. 4568 */ 4569 error = iwn_tx_data(sc, m, ni); 4570 } else { 4571 /* 4572 * Caller supplied explicit parameters to use in 4573 * sending the frame. 4574 */ 4575 error = iwn_tx_data_raw(sc, m, ni, params); 4576 } 4577 if (error != 0) { 4578 /* NB: m is reclaimed on tx failure */ 4579 ieee80211_free_node(ni); 4580 ifp->if_oerrors++; 4581 } 4582 sc->sc_tx_timer = 5; 4583 4584 IWN_UNLOCK(sc); 4585 4586 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4587 4588 return error; 4589 } 4590 4591 static void 4592 iwn_start(struct ifnet *ifp) 4593 { 4594 struct iwn_softc *sc = ifp->if_softc; 4595 4596 IWN_LOCK(sc); 4597 iwn_start_locked(ifp); 4598 IWN_UNLOCK(sc); 4599 } 4600 4601 static void 4602 iwn_start_locked(struct ifnet *ifp) 4603 { 4604 struct iwn_softc *sc = ifp->if_softc; 4605 struct ieee80211_node *ni; 4606 struct mbuf *m; 4607 4608 IWN_LOCK_ASSERT(sc); 4609 4610 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4611 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4612 return; 4613 4614 for (;;) { 4615 if (sc->qfullmsk != 0) { 4616 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4617 break; 4618 } 4619 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4620 if (m == NULL) 4621 break; 4622 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4623 if (iwn_tx_data(sc, m, ni) != 0) { 4624 ieee80211_free_node(ni); 4625 ifp->if_oerrors++; 4626 continue; 4627 } 4628 sc->sc_tx_timer = 5; 4629 } 4630 } 4631 4632 static void 4633 iwn_watchdog(void *arg) 4634 { 4635 struct iwn_softc *sc = arg; 4636 struct ifnet *ifp = sc->sc_ifp; 4637 struct ieee80211com *ic = ifp->if_l2com; 4638 4639 IWN_LOCK_ASSERT(sc); 4640 4641 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4642 4643 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4644 4645 if (sc->sc_tx_timer > 0) { 4646 if (--sc->sc_tx_timer == 0) { 4647 if_printf(ifp, "device timeout\n"); 4648 ieee80211_runtask(ic, &sc->sc_reinit_task); 4649 return; 4650 } 4651 } 4652 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4653 } 4654 4655 static int 4656 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4657 { 4658 struct iwn_softc *sc = ifp->if_softc; 4659 struct ieee80211com *ic = ifp->if_l2com; 4660 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4661 struct ifreq *ifr = (struct ifreq *) data; 4662 int error = 0, startall = 0, stop = 0; 4663 4664 switch (cmd) { 4665 case SIOCGIFADDR: 4666 error = ether_ioctl(ifp, cmd, data); 4667 break; 4668 case SIOCSIFFLAGS: 4669 IWN_LOCK(sc); 4670 if (ifp->if_flags & IFF_UP) { 4671 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4672 iwn_init_locked(sc); 4673 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4674 startall = 1; 4675 else 4676 stop = 1; 4677 } 4678 } else { 4679 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4680 iwn_stop_locked(sc); 4681 } 4682 IWN_UNLOCK(sc); 4683 if (startall) 4684 ieee80211_start_all(ic); 4685 else if (vap != NULL && stop) 4686 ieee80211_stop(vap); 4687 break; 4688 case SIOCGIFMEDIA: 4689 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4690 break; 4691 default: 4692 error = EINVAL; 4693 break; 4694 } 4695 return error; 4696 } 4697 4698 /* 4699 * Send a command to the firmware. 4700 */ 4701 static int 4702 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4703 { 4704 struct iwn_tx_ring *ring; 4705 struct iwn_tx_desc *desc; 4706 struct iwn_tx_data *data; 4707 struct iwn_tx_cmd *cmd; 4708 struct mbuf *m; 4709 bus_addr_t paddr; 4710 int totlen, error; 4711 int cmd_queue_num; 4712 4713 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4714 4715 if (async == 0) 4716 IWN_LOCK_ASSERT(sc); 4717 4718 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4719 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4720 else 4721 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4722 4723 ring = &sc->txq[cmd_queue_num]; 4724 desc = &ring->desc[ring->cur]; 4725 data = &ring->data[ring->cur]; 4726 totlen = 4 + size; 4727 4728 if (size > sizeof cmd->data) { 4729 /* Command is too large to fit in a descriptor. */ 4730 if (totlen > MCLBYTES) 4731 return EINVAL; 4732 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4733 if (m == NULL) 4734 return ENOMEM; 4735 cmd = mtod(m, struct iwn_tx_cmd *); 4736 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4737 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4738 if (error != 0) { 4739 m_freem(m); 4740 return error; 4741 } 4742 data->m = m; 4743 } else { 4744 cmd = &ring->cmd[ring->cur]; 4745 paddr = data->cmd_paddr; 4746 } 4747 4748 cmd->code = code; 4749 cmd->flags = 0; 4750 cmd->qid = ring->qid; 4751 cmd->idx = ring->cur; 4752 memcpy(cmd->data, buf, size); 4753 4754 desc->nsegs = 1; 4755 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4756 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4757 4758 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4759 __func__, iwn_intr_str(cmd->code), cmd->code, 4760 cmd->flags, cmd->qid, cmd->idx); 4761 4762 if (size > sizeof cmd->data) { 4763 bus_dmamap_sync(ring->data_dmat, data->map, 4764 BUS_DMASYNC_PREWRITE); 4765 } else { 4766 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4767 BUS_DMASYNC_PREWRITE); 4768 } 4769 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4770 BUS_DMASYNC_PREWRITE); 4771 4772 /* Kick command ring. */ 4773 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4774 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4775 4776 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4777 4778 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4779 } 4780 4781 static int 4782 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4783 { 4784 struct iwn4965_node_info hnode; 4785 caddr_t src, dst; 4786 4787 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4788 4789 /* 4790 * We use the node structure for 5000 Series internally (it is 4791 * a superset of the one for 4965AGN). We thus copy the common 4792 * fields before sending the command. 4793 */ 4794 src = (caddr_t)node; 4795 dst = (caddr_t)&hnode; 4796 memcpy(dst, src, 48); 4797 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4798 memcpy(dst + 48, src + 72, 20); 4799 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4800 } 4801 4802 static int 4803 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4804 { 4805 4806 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4807 4808 /* Direct mapping. */ 4809 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4810 } 4811 4812 static int 4813 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4814 { 4815 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4816 struct iwn_node *wn = (void *)ni; 4817 struct ieee80211_rateset *rs; 4818 struct iwn_cmd_link_quality linkq; 4819 uint8_t txant; 4820 int i, rate, txrate; 4821 int is_11n; 4822 4823 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4824 4825 /* Use the first valid TX antenna. */ 4826 txant = IWN_LSB(sc->txchainmask); 4827 4828 memset(&linkq, 0, sizeof linkq); 4829 linkq.id = wn->id; 4830 linkq.antmsk_1stream = txant; 4831 4832 /* 4833 * The '2 stream' setup is a bit .. odd. 4834 * 4835 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4836 * the firmware panics (eg Intel 5100.) 4837 * 4838 * For NICs that support two antennas, we use ANT_AB. 4839 * 4840 * For NICs that support three antennas, we use the two that 4841 * wasn't the default one. 4842 * 4843 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 4844 * this to only one antenna. 4845 */ 4846 4847 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 4848 4849 /* Default - transmit on the other antennas */ 4850 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 4851 4852 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 4853 if (linkq.antmsk_2stream == 0) 4854 linkq.antmsk_2stream = IWN_ANT_AB; 4855 4856 /* 4857 * If the NIC is a two-stream TX NIC, configure the TX mask to 4858 * the default chainmask 4859 */ 4860 else if (sc->ntxchains == 2) 4861 linkq.antmsk_2stream = sc->txchainmask; 4862 4863 linkq.ampdu_max = 32; /* XXX negotiated? */ 4864 linkq.ampdu_threshold = 3; 4865 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4866 4867 DPRINTF(sc, IWN_DEBUG_XMIT, 4868 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 4869 __func__, 4870 linkq.antmsk_1stream, 4871 linkq.antmsk_2stream, 4872 sc->ntxchains); 4873 4874 /* 4875 * Are we using 11n rates? Ensure the channel is 4876 * 11n _and_ we have some 11n rates, or don't 4877 * try. 4878 */ 4879 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 4880 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4881 is_11n = 1; 4882 } else { 4883 rs = &ni->ni_rates; 4884 is_11n = 0; 4885 } 4886 4887 /* Start at highest available bit-rate. */ 4888 /* 4889 * XXX this is all very dirty! 4890 */ 4891 if (is_11n) 4892 txrate = ni->ni_htrates.rs_nrates - 1; 4893 else 4894 txrate = rs->rs_nrates - 1; 4895 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4896 uint32_t plcp; 4897 4898 if (is_11n) 4899 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 4900 else 4901 rate = RV(rs->rs_rates[txrate]); 4902 4903 DPRINTF(sc, IWN_DEBUG_XMIT, 4904 "%s: i=%d, txrate=%d, rate=0x%02x\n", 4905 __func__, 4906 i, 4907 txrate, 4908 rate); 4909 4910 /* Do rate -> PLCP config mapping */ 4911 plcp = iwn_rate_to_plcp(sc, ni, rate); 4912 linkq.retry[i] = plcp; 4913 4914 /* 4915 * The mimo field is an index into the table which 4916 * indicates the first index where it and subsequent entries 4917 * will not be using MIMO. 4918 * 4919 * Since we're filling linkq from 0..15 and we're filling 4920 * from the higest MCS rates to the lowest rates, if we 4921 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 4922 * the next entry.) That way if the next entry is a non-MIMO 4923 * entry, we're already pointing at it. 4924 */ 4925 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4926 RV(le32toh(plcp)) > 7) 4927 linkq.mimo = i + 1; 4928 4929 /* Next retry at immediate lower bit-rate. */ 4930 if (txrate > 0) 4931 txrate--; 4932 } 4933 4934 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4935 4936 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4937 #undef RV 4938 } 4939 4940 /* 4941 * Broadcast node is used to send group-addressed and management frames. 4942 */ 4943 static int 4944 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4945 { 4946 struct iwn_ops *ops = &sc->ops; 4947 struct ifnet *ifp = sc->sc_ifp; 4948 struct ieee80211com *ic = ifp->if_l2com; 4949 struct iwn_node_info node; 4950 struct iwn_cmd_link_quality linkq; 4951 uint8_t txant; 4952 int i, error; 4953 4954 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4955 4956 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 4957 4958 memset(&node, 0, sizeof node); 4959 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4960 node.id = sc->broadcast_id; 4961 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4962 if ((error = ops->add_node(sc, &node, async)) != 0) 4963 return error; 4964 4965 /* Use the first valid TX antenna. */ 4966 txant = IWN_LSB(sc->txchainmask); 4967 4968 memset(&linkq, 0, sizeof linkq); 4969 linkq.id = sc->broadcast_id; 4970 linkq.antmsk_1stream = txant; 4971 linkq.antmsk_2stream = IWN_ANT_AB; 4972 linkq.ampdu_max = 64; 4973 linkq.ampdu_threshold = 3; 4974 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4975 4976 /* Use lowest mandatory bit-rate. */ 4977 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4978 linkq.retry[0] = htole32(0xd); 4979 else 4980 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 4981 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 4982 /* Use same bit-rate for all TX retries. */ 4983 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 4984 linkq.retry[i] = linkq.retry[0]; 4985 } 4986 4987 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4988 4989 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 4990 } 4991 4992 static int 4993 iwn_updateedca(struct ieee80211com *ic) 4994 { 4995 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 4996 struct iwn_softc *sc = ic->ic_ifp->if_softc; 4997 struct iwn_edca_params cmd; 4998 int aci; 4999 5000 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5001 5002 memset(&cmd, 0, sizeof cmd); 5003 cmd.flags = htole32(IWN_EDCA_UPDATE); 5004 for (aci = 0; aci < WME_NUM_AC; aci++) { 5005 const struct wmeParams *ac = 5006 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5007 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5008 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5009 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5010 cmd.ac[aci].txoplimit = 5011 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5012 } 5013 IEEE80211_UNLOCK(ic); 5014 IWN_LOCK(sc); 5015 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5016 IWN_UNLOCK(sc); 5017 IEEE80211_LOCK(ic); 5018 5019 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5020 5021 return 0; 5022 #undef IWN_EXP2 5023 } 5024 5025 static void 5026 iwn_update_mcast(struct ifnet *ifp) 5027 { 5028 /* Ignore */ 5029 } 5030 5031 static void 5032 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5033 { 5034 struct iwn_cmd_led led; 5035 5036 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5037 5038 #if 0 5039 /* XXX don't set LEDs during scan? */ 5040 if (sc->sc_is_scanning) 5041 return; 5042 #endif 5043 5044 /* Clear microcode LED ownership. */ 5045 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5046 5047 led.which = which; 5048 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5049 led.off = off; 5050 led.on = on; 5051 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5052 } 5053 5054 /* 5055 * Set the critical temperature at which the firmware will stop the radio 5056 * and notify us. 5057 */ 5058 static int 5059 iwn_set_critical_temp(struct iwn_softc *sc) 5060 { 5061 struct iwn_critical_temp crit; 5062 int32_t temp; 5063 5064 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5065 5066 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5067 5068 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5069 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5070 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5071 temp = IWN_CTOK(110); 5072 else 5073 temp = 110; 5074 memset(&crit, 0, sizeof crit); 5075 crit.tempR = htole32(temp); 5076 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5077 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5078 } 5079 5080 static int 5081 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5082 { 5083 struct iwn_cmd_timing cmd; 5084 uint64_t val, mod; 5085 5086 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5087 5088 memset(&cmd, 0, sizeof cmd); 5089 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5090 cmd.bintval = htole16(ni->ni_intval); 5091 cmd.lintval = htole16(10); 5092 5093 /* Compute remaining time until next beacon. */ 5094 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5095 mod = le64toh(cmd.tstamp) % val; 5096 cmd.binitval = htole32((uint32_t)(val - mod)); 5097 5098 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5099 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5100 5101 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5102 } 5103 5104 static void 5105 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5106 { 5107 struct ifnet *ifp = sc->sc_ifp; 5108 struct ieee80211com *ic = ifp->if_l2com; 5109 5110 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5111 5112 /* Adjust TX power if need be (delta >= 3 degC). */ 5113 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5114 __func__, sc->temp, temp); 5115 if (abs(temp - sc->temp) >= 3) { 5116 /* Record temperature of last calibration. */ 5117 sc->temp = temp; 5118 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5119 } 5120 } 5121 5122 /* 5123 * Set TX power for current channel (each rate has its own power settings). 5124 * This function takes into account the regulatory information from EEPROM, 5125 * the current temperature and the current voltage. 5126 */ 5127 static int 5128 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5129 int async) 5130 { 5131 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5132 #define fdivround(a, b, n) \ 5133 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5134 /* Linear interpolation. */ 5135 #define interpolate(x, x1, y1, x2, y2, n) \ 5136 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5137 5138 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5139 struct iwn_ucode_info *uc = &sc->ucode_info; 5140 struct iwn4965_cmd_txpower cmd; 5141 struct iwn4965_eeprom_chan_samples *chans; 5142 const uint8_t *rf_gain, *dsp_gain; 5143 int32_t vdiff, tdiff; 5144 int i, c, grp, maxpwr; 5145 uint8_t chan; 5146 5147 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5148 /* Retrieve current channel from last RXON. */ 5149 chan = sc->rxon->chan; 5150 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5151 chan); 5152 5153 memset(&cmd, 0, sizeof cmd); 5154 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5155 cmd.chan = chan; 5156 5157 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5158 maxpwr = sc->maxpwr5GHz; 5159 rf_gain = iwn4965_rf_gain_5ghz; 5160 dsp_gain = iwn4965_dsp_gain_5ghz; 5161 } else { 5162 maxpwr = sc->maxpwr2GHz; 5163 rf_gain = iwn4965_rf_gain_2ghz; 5164 dsp_gain = iwn4965_dsp_gain_2ghz; 5165 } 5166 5167 /* Compute voltage compensation. */ 5168 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5169 if (vdiff > 0) 5170 vdiff *= 2; 5171 if (abs(vdiff) > 2) 5172 vdiff = 0; 5173 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5174 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5175 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5176 5177 /* Get channel attenuation group. */ 5178 if (chan <= 20) /* 1-20 */ 5179 grp = 4; 5180 else if (chan <= 43) /* 34-43 */ 5181 grp = 0; 5182 else if (chan <= 70) /* 44-70 */ 5183 grp = 1; 5184 else if (chan <= 124) /* 71-124 */ 5185 grp = 2; 5186 else /* 125-200 */ 5187 grp = 3; 5188 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5189 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5190 5191 /* Get channel sub-band. */ 5192 for (i = 0; i < IWN_NBANDS; i++) 5193 if (sc->bands[i].lo != 0 && 5194 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5195 break; 5196 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5197 return EINVAL; 5198 chans = sc->bands[i].chans; 5199 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5200 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5201 5202 for (c = 0; c < 2; c++) { 5203 uint8_t power, gain, temp; 5204 int maxchpwr, pwr, ridx, idx; 5205 5206 power = interpolate(chan, 5207 chans[0].num, chans[0].samples[c][1].power, 5208 chans[1].num, chans[1].samples[c][1].power, 1); 5209 gain = interpolate(chan, 5210 chans[0].num, chans[0].samples[c][1].gain, 5211 chans[1].num, chans[1].samples[c][1].gain, 1); 5212 temp = interpolate(chan, 5213 chans[0].num, chans[0].samples[c][1].temp, 5214 chans[1].num, chans[1].samples[c][1].temp, 1); 5215 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5216 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5217 __func__, c, power, gain, temp); 5218 5219 /* Compute temperature compensation. */ 5220 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5221 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5222 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5223 __func__, tdiff, sc->temp, temp); 5224 5225 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5226 /* Convert dBm to half-dBm. */ 5227 maxchpwr = sc->maxpwr[chan] * 2; 5228 if ((ridx / 8) & 1) 5229 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5230 5231 pwr = maxpwr; 5232 5233 /* Adjust TX power based on rate. */ 5234 if ((ridx % 8) == 5) 5235 pwr -= 15; /* OFDM48: -7.5dB */ 5236 else if ((ridx % 8) == 6) 5237 pwr -= 17; /* OFDM54: -8.5dB */ 5238 else if ((ridx % 8) == 7) 5239 pwr -= 20; /* OFDM60: -10dB */ 5240 else 5241 pwr -= 10; /* Others: -5dB */ 5242 5243 /* Do not exceed channel max TX power. */ 5244 if (pwr > maxchpwr) 5245 pwr = maxchpwr; 5246 5247 idx = gain - (pwr - power) - tdiff - vdiff; 5248 if ((ridx / 8) & 1) /* MIMO */ 5249 idx += (int32_t)le32toh(uc->atten[grp][c]); 5250 5251 if (cmd.band == 0) 5252 idx += 9; /* 5GHz */ 5253 if (ridx == IWN_RIDX_MAX) 5254 idx += 5; /* CCK */ 5255 5256 /* Make sure idx stays in a valid range. */ 5257 if (idx < 0) 5258 idx = 0; 5259 else if (idx > IWN4965_MAX_PWR_INDEX) 5260 idx = IWN4965_MAX_PWR_INDEX; 5261 5262 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5263 "%s: Tx chain %d, rate idx %d: power=%d\n", 5264 __func__, c, ridx, idx); 5265 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5266 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5267 } 5268 } 5269 5270 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5271 "%s: set tx power for chan %d\n", __func__, chan); 5272 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5273 5274 #undef interpolate 5275 #undef fdivround 5276 } 5277 5278 static int 5279 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5280 int async) 5281 { 5282 struct iwn5000_cmd_txpower cmd; 5283 5284 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5285 5286 /* 5287 * TX power calibration is handled automatically by the firmware 5288 * for 5000 Series. 5289 */ 5290 memset(&cmd, 0, sizeof cmd); 5291 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5292 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5293 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5294 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5295 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5296 } 5297 5298 /* 5299 * Retrieve the maximum RSSI (in dBm) among receivers. 5300 */ 5301 static int 5302 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5303 { 5304 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5305 uint8_t mask, agc; 5306 int rssi; 5307 5308 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5309 5310 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5311 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5312 5313 rssi = 0; 5314 if (mask & IWN_ANT_A) 5315 rssi = MAX(rssi, phy->rssi[0]); 5316 if (mask & IWN_ANT_B) 5317 rssi = MAX(rssi, phy->rssi[2]); 5318 if (mask & IWN_ANT_C) 5319 rssi = MAX(rssi, phy->rssi[4]); 5320 5321 DPRINTF(sc, IWN_DEBUG_RECV, 5322 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5323 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5324 rssi - agc - IWN_RSSI_TO_DBM); 5325 return rssi - agc - IWN_RSSI_TO_DBM; 5326 } 5327 5328 static int 5329 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5330 { 5331 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5332 uint8_t agc; 5333 int rssi; 5334 5335 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5336 5337 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5338 5339 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5340 le16toh(phy->rssi[1]) & 0xff); 5341 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5342 5343 DPRINTF(sc, IWN_DEBUG_RECV, 5344 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5345 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5346 rssi - agc - IWN_RSSI_TO_DBM); 5347 return rssi - agc - IWN_RSSI_TO_DBM; 5348 } 5349 5350 /* 5351 * Retrieve the average noise (in dBm) among receivers. 5352 */ 5353 static int 5354 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5355 { 5356 int i, total, nbant, noise; 5357 5358 total = nbant = 0; 5359 for (i = 0; i < 3; i++) { 5360 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5361 continue; 5362 total += noise; 5363 nbant++; 5364 } 5365 /* There should be at least one antenna but check anyway. */ 5366 return (nbant == 0) ? -127 : (total / nbant) - 107; 5367 } 5368 5369 /* 5370 * Compute temperature (in degC) from last received statistics. 5371 */ 5372 static int 5373 iwn4965_get_temperature(struct iwn_softc *sc) 5374 { 5375 struct iwn_ucode_info *uc = &sc->ucode_info; 5376 int32_t r1, r2, r3, r4, temp; 5377 5378 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5379 5380 r1 = le32toh(uc->temp[0].chan20MHz); 5381 r2 = le32toh(uc->temp[1].chan20MHz); 5382 r3 = le32toh(uc->temp[2].chan20MHz); 5383 r4 = le32toh(sc->rawtemp); 5384 5385 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5386 return 0; 5387 5388 /* Sign-extend 23-bit R4 value to 32-bit. */ 5389 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5390 /* Compute temperature in Kelvin. */ 5391 temp = (259 * (r4 - r2)) / (r3 - r1); 5392 temp = (temp * 97) / 100 + 8; 5393 5394 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5395 IWN_KTOC(temp)); 5396 return IWN_KTOC(temp); 5397 } 5398 5399 static int 5400 iwn5000_get_temperature(struct iwn_softc *sc) 5401 { 5402 int32_t temp; 5403 5404 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5405 5406 /* 5407 * Temperature is not used by the driver for 5000 Series because 5408 * TX power calibration is handled by firmware. 5409 */ 5410 temp = le32toh(sc->rawtemp); 5411 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5412 temp = (temp / -5) + sc->temp_off; 5413 temp = IWN_KTOC(temp); 5414 } 5415 return temp; 5416 } 5417 5418 /* 5419 * Initialize sensitivity calibration state machine. 5420 */ 5421 static int 5422 iwn_init_sensitivity(struct iwn_softc *sc) 5423 { 5424 struct iwn_ops *ops = &sc->ops; 5425 struct iwn_calib_state *calib = &sc->calib; 5426 uint32_t flags; 5427 int error; 5428 5429 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5430 5431 /* Reset calibration state machine. */ 5432 memset(calib, 0, sizeof (*calib)); 5433 calib->state = IWN_CALIB_STATE_INIT; 5434 calib->cck_state = IWN_CCK_STATE_HIFA; 5435 /* Set initial correlation values. */ 5436 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5437 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5438 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5439 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5440 calib->cck_x4 = 125; 5441 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5442 calib->energy_cck = sc->limits->energy_cck; 5443 5444 /* Write initial sensitivity. */ 5445 if ((error = iwn_send_sensitivity(sc)) != 0) 5446 return error; 5447 5448 /* Write initial gains. */ 5449 if ((error = ops->init_gains(sc)) != 0) 5450 return error; 5451 5452 /* Request statistics at each beacon interval. */ 5453 flags = 0; 5454 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5455 __func__); 5456 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5457 } 5458 5459 /* 5460 * Collect noise and RSSI statistics for the first 20 beacons received 5461 * after association and use them to determine connected antennas and 5462 * to set differential gains. 5463 */ 5464 static void 5465 iwn_collect_noise(struct iwn_softc *sc, 5466 const struct iwn_rx_general_stats *stats) 5467 { 5468 struct iwn_ops *ops = &sc->ops; 5469 struct iwn_calib_state *calib = &sc->calib; 5470 struct ifnet *ifp = sc->sc_ifp; 5471 struct ieee80211com *ic = ifp->if_l2com; 5472 uint32_t val; 5473 int i; 5474 5475 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5476 5477 /* Accumulate RSSI and noise for all 3 antennas. */ 5478 for (i = 0; i < 3; i++) { 5479 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5480 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5481 } 5482 /* NB: We update differential gains only once after 20 beacons. */ 5483 if (++calib->nbeacons < 20) 5484 return; 5485 5486 /* Determine highest average RSSI. */ 5487 val = MAX(calib->rssi[0], calib->rssi[1]); 5488 val = MAX(calib->rssi[2], val); 5489 5490 /* Determine which antennas are connected. */ 5491 sc->chainmask = sc->rxchainmask; 5492 for (i = 0; i < 3; i++) 5493 if (val - calib->rssi[i] > 15 * 20) 5494 sc->chainmask &= ~(1 << i); 5495 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5496 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5497 __func__, sc->rxchainmask, sc->chainmask); 5498 5499 /* If none of the TX antennas are connected, keep at least one. */ 5500 if ((sc->chainmask & sc->txchainmask) == 0) 5501 sc->chainmask |= IWN_LSB(sc->txchainmask); 5502 5503 (void)ops->set_gains(sc); 5504 calib->state = IWN_CALIB_STATE_RUN; 5505 5506 #ifdef notyet 5507 /* XXX Disable RX chains with no antennas connected. */ 5508 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5509 if (sc->sc_is_scanning) 5510 device_printf(sc->sc_dev, 5511 "%s: is_scanning set, before RXON\n", 5512 __func__); 5513 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5514 #endif 5515 5516 /* Enable power-saving mode if requested by user. */ 5517 if (ic->ic_flags & IEEE80211_F_PMGTON) 5518 (void)iwn_set_pslevel(sc, 0, 3, 1); 5519 5520 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5521 5522 } 5523 5524 static int 5525 iwn4965_init_gains(struct iwn_softc *sc) 5526 { 5527 struct iwn_phy_calib_gain cmd; 5528 5529 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5530 5531 memset(&cmd, 0, sizeof cmd); 5532 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5533 /* Differential gains initially set to 0 for all 3 antennas. */ 5534 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5535 "%s: setting initial differential gains\n", __func__); 5536 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5537 } 5538 5539 static int 5540 iwn5000_init_gains(struct iwn_softc *sc) 5541 { 5542 struct iwn_phy_calib cmd; 5543 5544 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5545 5546 memset(&cmd, 0, sizeof cmd); 5547 cmd.code = sc->reset_noise_gain; 5548 cmd.ngroups = 1; 5549 cmd.isvalid = 1; 5550 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5551 "%s: setting initial differential gains\n", __func__); 5552 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5553 } 5554 5555 static int 5556 iwn4965_set_gains(struct iwn_softc *sc) 5557 { 5558 struct iwn_calib_state *calib = &sc->calib; 5559 struct iwn_phy_calib_gain cmd; 5560 int i, delta, noise; 5561 5562 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5563 5564 /* Get minimal noise among connected antennas. */ 5565 noise = INT_MAX; /* NB: There's at least one antenna. */ 5566 for (i = 0; i < 3; i++) 5567 if (sc->chainmask & (1 << i)) 5568 noise = MIN(calib->noise[i], noise); 5569 5570 memset(&cmd, 0, sizeof cmd); 5571 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5572 /* Set differential gains for connected antennas. */ 5573 for (i = 0; i < 3; i++) { 5574 if (sc->chainmask & (1 << i)) { 5575 /* Compute attenuation (in unit of 1.5dB). */ 5576 delta = (noise - (int32_t)calib->noise[i]) / 30; 5577 /* NB: delta <= 0 */ 5578 /* Limit to [-4.5dB,0]. */ 5579 cmd.gain[i] = MIN(abs(delta), 3); 5580 if (delta < 0) 5581 cmd.gain[i] |= 1 << 2; /* sign bit */ 5582 } 5583 } 5584 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5585 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5586 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5587 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5588 } 5589 5590 static int 5591 iwn5000_set_gains(struct iwn_softc *sc) 5592 { 5593 struct iwn_calib_state *calib = &sc->calib; 5594 struct iwn_phy_calib_gain cmd; 5595 int i, ant, div, delta; 5596 5597 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5598 5599 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5600 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5601 5602 memset(&cmd, 0, sizeof cmd); 5603 cmd.code = sc->noise_gain; 5604 cmd.ngroups = 1; 5605 cmd.isvalid = 1; 5606 /* Get first available RX antenna as referential. */ 5607 ant = IWN_LSB(sc->rxchainmask); 5608 /* Set differential gains for other antennas. */ 5609 for (i = ant + 1; i < 3; i++) { 5610 if (sc->chainmask & (1 << i)) { 5611 /* The delta is relative to antenna "ant". */ 5612 delta = ((int32_t)calib->noise[ant] - 5613 (int32_t)calib->noise[i]) / div; 5614 /* Limit to [-4.5dB,+4.5dB]. */ 5615 cmd.gain[i - 1] = MIN(abs(delta), 3); 5616 if (delta < 0) 5617 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5618 } 5619 } 5620 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5621 "setting differential gains Ant B/C: %x/%x (%x)\n", 5622 cmd.gain[0], cmd.gain[1], sc->chainmask); 5623 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5624 } 5625 5626 /* 5627 * Tune RF RX sensitivity based on the number of false alarms detected 5628 * during the last beacon period. 5629 */ 5630 static void 5631 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5632 { 5633 #define inc(val, inc, max) \ 5634 if ((val) < (max)) { \ 5635 if ((val) < (max) - (inc)) \ 5636 (val) += (inc); \ 5637 else \ 5638 (val) = (max); \ 5639 needs_update = 1; \ 5640 } 5641 #define dec(val, dec, min) \ 5642 if ((val) > (min)) { \ 5643 if ((val) > (min) + (dec)) \ 5644 (val) -= (dec); \ 5645 else \ 5646 (val) = (min); \ 5647 needs_update = 1; \ 5648 } 5649 5650 const struct iwn_sensitivity_limits *limits = sc->limits; 5651 struct iwn_calib_state *calib = &sc->calib; 5652 uint32_t val, rxena, fa; 5653 uint32_t energy[3], energy_min; 5654 uint8_t noise[3], noise_ref; 5655 int i, needs_update = 0; 5656 5657 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5658 5659 /* Check that we've been enabled long enough. */ 5660 if ((rxena = le32toh(stats->general.load)) == 0){ 5661 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5662 return; 5663 } 5664 5665 /* Compute number of false alarms since last call for OFDM. */ 5666 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5667 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5668 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5669 5670 if (fa > 50 * rxena) { 5671 /* High false alarm count, decrease sensitivity. */ 5672 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5673 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5674 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5675 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5676 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5677 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5678 5679 } else if (fa < 5 * rxena) { 5680 /* Low false alarm count, increase sensitivity. */ 5681 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5682 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5683 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5684 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5685 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5686 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5687 } 5688 5689 /* Compute maximum noise among 3 receivers. */ 5690 for (i = 0; i < 3; i++) 5691 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5692 val = MAX(noise[0], noise[1]); 5693 val = MAX(noise[2], val); 5694 /* Insert it into our samples table. */ 5695 calib->noise_samples[calib->cur_noise_sample] = val; 5696 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5697 5698 /* Compute maximum noise among last 20 samples. */ 5699 noise_ref = calib->noise_samples[0]; 5700 for (i = 1; i < 20; i++) 5701 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5702 5703 /* Compute maximum energy among 3 receivers. */ 5704 for (i = 0; i < 3; i++) 5705 energy[i] = le32toh(stats->general.energy[i]); 5706 val = MIN(energy[0], energy[1]); 5707 val = MIN(energy[2], val); 5708 /* Insert it into our samples table. */ 5709 calib->energy_samples[calib->cur_energy_sample] = val; 5710 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5711 5712 /* Compute minimum energy among last 10 samples. */ 5713 energy_min = calib->energy_samples[0]; 5714 for (i = 1; i < 10; i++) 5715 energy_min = MAX(energy_min, calib->energy_samples[i]); 5716 energy_min += 6; 5717 5718 /* Compute number of false alarms since last call for CCK. */ 5719 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5720 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5721 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5722 5723 if (fa > 50 * rxena) { 5724 /* High false alarm count, decrease sensitivity. */ 5725 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5726 "%s: CCK high false alarm count: %u\n", __func__, fa); 5727 calib->cck_state = IWN_CCK_STATE_HIFA; 5728 calib->low_fa = 0; 5729 5730 if (calib->cck_x4 > 160) { 5731 calib->noise_ref = noise_ref; 5732 if (calib->energy_cck > 2) 5733 dec(calib->energy_cck, 2, energy_min); 5734 } 5735 if (calib->cck_x4 < 160) { 5736 calib->cck_x4 = 161; 5737 needs_update = 1; 5738 } else 5739 inc(calib->cck_x4, 3, limits->max_cck_x4); 5740 5741 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5742 5743 } else if (fa < 5 * rxena) { 5744 /* Low false alarm count, increase sensitivity. */ 5745 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5746 "%s: CCK low false alarm count: %u\n", __func__, fa); 5747 calib->cck_state = IWN_CCK_STATE_LOFA; 5748 calib->low_fa++; 5749 5750 if (calib->cck_state != IWN_CCK_STATE_INIT && 5751 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5752 calib->low_fa > 100)) { 5753 inc(calib->energy_cck, 2, limits->min_energy_cck); 5754 dec(calib->cck_x4, 3, limits->min_cck_x4); 5755 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5756 } 5757 } else { 5758 /* Not worth to increase or decrease sensitivity. */ 5759 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5760 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5761 calib->low_fa = 0; 5762 calib->noise_ref = noise_ref; 5763 5764 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5765 /* Previous interval had many false alarms. */ 5766 dec(calib->energy_cck, 8, energy_min); 5767 } 5768 calib->cck_state = IWN_CCK_STATE_INIT; 5769 } 5770 5771 if (needs_update) 5772 (void)iwn_send_sensitivity(sc); 5773 5774 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5775 5776 #undef dec 5777 #undef inc 5778 } 5779 5780 static int 5781 iwn_send_sensitivity(struct iwn_softc *sc) 5782 { 5783 struct iwn_calib_state *calib = &sc->calib; 5784 struct iwn_enhanced_sensitivity_cmd cmd; 5785 int len; 5786 5787 memset(&cmd, 0, sizeof cmd); 5788 len = sizeof (struct iwn_sensitivity_cmd); 5789 cmd.which = IWN_SENSITIVITY_WORKTBL; 5790 /* OFDM modulation. */ 5791 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5792 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5793 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5794 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5795 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5796 cmd.energy_ofdm_th = htole16(62); 5797 /* CCK modulation. */ 5798 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5799 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5800 cmd.energy_cck = htole16(calib->energy_cck); 5801 /* Barker modulation: use default values. */ 5802 cmd.corr_barker = htole16(190); 5803 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 5804 5805 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5806 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5807 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5808 calib->ofdm_mrc_x4, calib->cck_x4, 5809 calib->cck_mrc_x4, calib->energy_cck); 5810 5811 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5812 goto send; 5813 /* Enhanced sensitivity settings. */ 5814 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5815 cmd.ofdm_det_slope_mrc = htole16(668); 5816 cmd.ofdm_det_icept_mrc = htole16(4); 5817 cmd.ofdm_det_slope = htole16(486); 5818 cmd.ofdm_det_icept = htole16(37); 5819 cmd.cck_det_slope_mrc = htole16(853); 5820 cmd.cck_det_icept_mrc = htole16(4); 5821 cmd.cck_det_slope = htole16(476); 5822 cmd.cck_det_icept = htole16(99); 5823 send: 5824 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5825 } 5826 5827 /* 5828 * Look at the increase of PLCP errors over time; if it exceeds 5829 * a programmed threshold then trigger an RF retune. 5830 */ 5831 static void 5832 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 5833 { 5834 int32_t delta_ofdm, delta_ht, delta_cck; 5835 struct iwn_calib_state *calib = &sc->calib; 5836 int delta_ticks, cur_ticks; 5837 int delta_msec; 5838 int thresh; 5839 5840 /* 5841 * Calculate the difference between the current and 5842 * previous statistics. 5843 */ 5844 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 5845 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5846 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 5847 5848 /* 5849 * Calculate the delta in time between successive statistics 5850 * messages. Yes, it can roll over; so we make sure that 5851 * this doesn't happen. 5852 * 5853 * XXX go figure out what to do about rollover 5854 * XXX go figure out what to do if ticks rolls over to -ve instead! 5855 * XXX go stab signed integer overflow undefined-ness in the face. 5856 */ 5857 cur_ticks = ticks; 5858 delta_ticks = cur_ticks - sc->last_calib_ticks; 5859 5860 /* 5861 * If any are negative, then the firmware likely reset; so just 5862 * bail. We'll pick this up next time. 5863 */ 5864 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 5865 return; 5866 5867 /* 5868 * delta_ticks is in ticks; we need to convert it up to milliseconds 5869 * so we can do some useful math with it. 5870 */ 5871 delta_msec = ticks_to_msecs(delta_ticks); 5872 5873 /* 5874 * Calculate what our threshold is given the current delta_msec. 5875 */ 5876 thresh = sc->base_params->plcp_err_threshold * delta_msec; 5877 5878 DPRINTF(sc, IWN_DEBUG_STATE, 5879 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 5880 __func__, 5881 delta_msec, 5882 delta_cck, 5883 delta_ofdm, 5884 delta_ht, 5885 (delta_msec + delta_cck + delta_ofdm + delta_ht), 5886 thresh); 5887 5888 /* 5889 * If we need a retune, then schedule a single channel scan 5890 * to a channel that isn't the currently active one! 5891 * 5892 * The math from linux iwlwifi: 5893 * 5894 * if ((delta * 100 / msecs) > threshold) 5895 */ 5896 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 5897 device_printf(sc->sc_dev, 5898 "%s: PLCP error threshold raw (%d) comparison (%d) " 5899 "over limit (%d); retune!\n", 5900 __func__, 5901 (delta_cck + delta_ofdm + delta_ht), 5902 (delta_cck + delta_ofdm + delta_ht) * 100, 5903 thresh); 5904 } 5905 } 5906 5907 /* 5908 * Set STA mode power saving level (between 0 and 5). 5909 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5910 */ 5911 static int 5912 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5913 { 5914 struct iwn_pmgt_cmd cmd; 5915 const struct iwn_pmgt *pmgt; 5916 uint32_t max, skip_dtim; 5917 uint32_t reg; 5918 int i; 5919 5920 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5921 "%s: dtim=%d, level=%d, async=%d\n", 5922 __func__, 5923 dtim, 5924 level, 5925 async); 5926 5927 /* Select which PS parameters to use. */ 5928 if (dtim <= 2) 5929 pmgt = &iwn_pmgt[0][level]; 5930 else if (dtim <= 10) 5931 pmgt = &iwn_pmgt[1][level]; 5932 else 5933 pmgt = &iwn_pmgt[2][level]; 5934 5935 memset(&cmd, 0, sizeof cmd); 5936 if (level != 0) /* not CAM */ 5937 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5938 if (level == 5) 5939 cmd.flags |= htole16(IWN_PS_FAST_PD); 5940 /* Retrieve PCIe Active State Power Management (ASPM). */ 5941 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5942 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5943 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5944 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5945 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5946 5947 if (dtim == 0) { 5948 dtim = 1; 5949 skip_dtim = 0; 5950 } else 5951 skip_dtim = pmgt->skip_dtim; 5952 if (skip_dtim != 0) { 5953 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 5954 max = pmgt->intval[4]; 5955 if (max == (uint32_t)-1) 5956 max = dtim * (skip_dtim + 1); 5957 else if (max > dtim) 5958 max = (max / dtim) * dtim; 5959 } else 5960 max = dtim; 5961 for (i = 0; i < 5; i++) 5962 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 5963 5964 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 5965 level); 5966 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 5967 } 5968 5969 static int 5970 iwn_send_btcoex(struct iwn_softc *sc) 5971 { 5972 struct iwn_bluetooth cmd; 5973 5974 memset(&cmd, 0, sizeof cmd); 5975 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 5976 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 5977 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 5978 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 5979 __func__); 5980 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 5981 } 5982 5983 static int 5984 iwn_send_advanced_btcoex(struct iwn_softc *sc) 5985 { 5986 static const uint32_t btcoex_3wire[12] = { 5987 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 5988 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 5989 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 5990 }; 5991 struct iwn6000_btcoex_config btconfig; 5992 struct iwn2000_btcoex_config btconfig2k; 5993 struct iwn_btcoex_priotable btprio; 5994 struct iwn_btcoex_prot btprot; 5995 int error, i; 5996 uint8_t flags; 5997 5998 memset(&btconfig, 0, sizeof btconfig); 5999 memset(&btconfig2k, 0, sizeof btconfig2k); 6000 6001 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6002 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6003 6004 if (sc->base_params->bt_sco_disable) 6005 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6006 else 6007 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6008 6009 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6010 6011 /* Default flags result is 145 as old value */ 6012 6013 /* 6014 * Flags value has to be review. Values must change if we 6015 * which to disable it 6016 */ 6017 if (sc->base_params->bt_session_2) { 6018 btconfig2k.flags = flags; 6019 btconfig2k.max_kill = 5; 6020 btconfig2k.bt3_t7_timer = 1; 6021 btconfig2k.kill_ack = htole32(0xffff0000); 6022 btconfig2k.kill_cts = htole32(0xffff0000); 6023 btconfig2k.sample_time = 2; 6024 btconfig2k.bt3_t2_timer = 0xc; 6025 6026 for (i = 0; i < 12; i++) 6027 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6028 btconfig2k.valid = htole16(0xff); 6029 btconfig2k.prio_boost = htole32(0xf0); 6030 DPRINTF(sc, IWN_DEBUG_RESET, 6031 "%s: configuring advanced bluetooth coexistence" 6032 " session 2, flags : 0x%x\n", 6033 __func__, 6034 flags); 6035 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6036 sizeof(btconfig2k), 1); 6037 } else { 6038 btconfig.flags = flags; 6039 btconfig.max_kill = 5; 6040 btconfig.bt3_t7_timer = 1; 6041 btconfig.kill_ack = htole32(0xffff0000); 6042 btconfig.kill_cts = htole32(0xffff0000); 6043 btconfig.sample_time = 2; 6044 btconfig.bt3_t2_timer = 0xc; 6045 6046 for (i = 0; i < 12; i++) 6047 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6048 btconfig.valid = htole16(0xff); 6049 btconfig.prio_boost = 0xf0; 6050 DPRINTF(sc, IWN_DEBUG_RESET, 6051 "%s: configuring advanced bluetooth coexistence," 6052 " flags : 0x%x\n", 6053 __func__, 6054 flags); 6055 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6056 sizeof(btconfig), 1); 6057 } 6058 6059 if (error != 0) 6060 return error; 6061 6062 memset(&btprio, 0, sizeof btprio); 6063 btprio.calib_init1 = 0x6; 6064 btprio.calib_init2 = 0x7; 6065 btprio.calib_periodic_low1 = 0x2; 6066 btprio.calib_periodic_low2 = 0x3; 6067 btprio.calib_periodic_high1 = 0x4; 6068 btprio.calib_periodic_high2 = 0x5; 6069 btprio.dtim = 0x6; 6070 btprio.scan52 = 0x8; 6071 btprio.scan24 = 0xa; 6072 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6073 1); 6074 if (error != 0) 6075 return error; 6076 6077 /* Force BT state machine change. */ 6078 memset(&btprot, 0, sizeof btprot); 6079 btprot.open = 1; 6080 btprot.type = 1; 6081 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6082 if (error != 0) 6083 return error; 6084 btprot.open = 0; 6085 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6086 } 6087 6088 static int 6089 iwn5000_runtime_calib(struct iwn_softc *sc) 6090 { 6091 struct iwn5000_calib_config cmd; 6092 6093 memset(&cmd, 0, sizeof cmd); 6094 cmd.ucode.once.enable = 0xffffffff; 6095 cmd.ucode.once.start = IWN5000_CALIB_DC; 6096 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6097 "%s: configuring runtime calibration\n", __func__); 6098 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6099 } 6100 6101 static int 6102 iwn_config(struct iwn_softc *sc) 6103 { 6104 struct iwn_ops *ops = &sc->ops; 6105 struct ifnet *ifp = sc->sc_ifp; 6106 struct ieee80211com *ic = ifp->if_l2com; 6107 uint32_t txmask; 6108 uint16_t rxchain; 6109 int error; 6110 6111 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6112 6113 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6114 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6115 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6116 " exclusive each together. Review NIC config file. Conf" 6117 " : 0x%08x Flags : 0x%08x \n", __func__, 6118 sc->base_params->calib_need, 6119 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6120 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6121 return (EINVAL); 6122 } 6123 6124 /* Compute temperature calib if needed. Will be send by send calib */ 6125 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6126 error = iwn5000_temp_offset_calib(sc); 6127 if (error != 0) { 6128 device_printf(sc->sc_dev, 6129 "%s: could not set temperature offset\n", __func__); 6130 return (error); 6131 } 6132 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6133 error = iwn5000_temp_offset_calibv2(sc); 6134 if (error != 0) { 6135 device_printf(sc->sc_dev, 6136 "%s: could not compute temperature offset v2\n", 6137 __func__); 6138 return (error); 6139 } 6140 } 6141 6142 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6143 /* Configure runtime DC calibration. */ 6144 error = iwn5000_runtime_calib(sc); 6145 if (error != 0) { 6146 device_printf(sc->sc_dev, 6147 "%s: could not configure runtime calibration\n", 6148 __func__); 6149 return error; 6150 } 6151 } 6152 6153 /* Configure valid TX chains for >=5000 Series. */ 6154 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6155 txmask = htole32(sc->txchainmask); 6156 DPRINTF(sc, IWN_DEBUG_RESET, 6157 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6158 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6159 sizeof txmask, 0); 6160 if (error != 0) { 6161 device_printf(sc->sc_dev, 6162 "%s: could not configure valid TX chains, " 6163 "error %d\n", __func__, error); 6164 return error; 6165 } 6166 } 6167 6168 /* Configure bluetooth coexistence. */ 6169 error = 0; 6170 6171 /* Configure bluetooth coexistence if needed. */ 6172 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6173 error = iwn_send_advanced_btcoex(sc); 6174 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6175 error = iwn_send_btcoex(sc); 6176 6177 if (error != 0) { 6178 device_printf(sc->sc_dev, 6179 "%s: could not configure bluetooth coexistence, error %d\n", 6180 __func__, error); 6181 return error; 6182 } 6183 6184 /* Set mode, channel, RX filter and enable RX. */ 6185 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6186 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6187 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6188 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6189 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6190 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6191 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6192 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6193 switch (ic->ic_opmode) { 6194 case IEEE80211_M_STA: 6195 sc->rxon->mode = IWN_MODE_STA; 6196 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6197 break; 6198 case IEEE80211_M_MONITOR: 6199 sc->rxon->mode = IWN_MODE_MONITOR; 6200 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6201 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6202 break; 6203 default: 6204 /* Should not get there. */ 6205 break; 6206 } 6207 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6208 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6209 sc->rxon->ht_single_mask = 0xff; 6210 sc->rxon->ht_dual_mask = 0xff; 6211 sc->rxon->ht_triple_mask = 0xff; 6212 rxchain = 6213 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6214 IWN_RXCHAIN_MIMO_COUNT(2) | 6215 IWN_RXCHAIN_IDLE_COUNT(2); 6216 sc->rxon->rxchain = htole16(rxchain); 6217 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6218 if (sc->sc_is_scanning) 6219 device_printf(sc->sc_dev, 6220 "%s: is_scanning set, before RXON\n", 6221 __func__); 6222 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6223 if (error != 0) { 6224 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6225 __func__); 6226 return error; 6227 } 6228 6229 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6230 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6231 __func__); 6232 return error; 6233 } 6234 6235 /* Configuration has changed, set TX power accordingly. */ 6236 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6237 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6238 __func__); 6239 return error; 6240 } 6241 6242 if ((error = iwn_set_critical_temp(sc)) != 0) { 6243 device_printf(sc->sc_dev, 6244 "%s: could not set critical temperature\n", __func__); 6245 return error; 6246 } 6247 6248 /* Set power saving level to CAM during initialization. */ 6249 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6250 device_printf(sc->sc_dev, 6251 "%s: could not set power saving level\n", __func__); 6252 return error; 6253 } 6254 6255 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6256 6257 return 0; 6258 } 6259 6260 /* 6261 * Add an ssid element to a frame. 6262 */ 6263 static uint8_t * 6264 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6265 { 6266 *frm++ = IEEE80211_ELEMID_SSID; 6267 *frm++ = len; 6268 memcpy(frm, ssid, len); 6269 return frm + len; 6270 } 6271 6272 static uint16_t 6273 iwn_get_active_dwell_time(struct iwn_softc *sc, 6274 struct ieee80211_channel *c, uint8_t n_probes) 6275 { 6276 /* No channel? Default to 2GHz settings */ 6277 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6278 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6279 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6280 } 6281 6282 /* 5GHz dwell time */ 6283 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6284 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6285 } 6286 6287 /* 6288 * Limit the total dwell time to 85% of the beacon interval. 6289 * 6290 * Returns the dwell time in milliseconds. 6291 */ 6292 static uint16_t 6293 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6294 { 6295 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6296 struct ieee80211vap *vap = NULL; 6297 int bintval = 0; 6298 6299 /* bintval is in TU (1.024mS) */ 6300 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6301 vap = TAILQ_FIRST(&ic->ic_vaps); 6302 bintval = vap->iv_bss->ni_intval; 6303 } 6304 6305 /* 6306 * If it's non-zero, we should calculate the minimum of 6307 * it and the DWELL_BASE. 6308 * 6309 * XXX Yes, the math should take into account that bintval 6310 * is 1.024mS, not 1mS.. 6311 */ 6312 if (bintval > 0) { 6313 DPRINTF(sc, IWN_DEBUG_SCAN, 6314 "%s: bintval=%d\n", 6315 __func__, 6316 bintval); 6317 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6318 } 6319 6320 /* No association context? Default */ 6321 return (IWN_PASSIVE_DWELL_BASE); 6322 } 6323 6324 static uint16_t 6325 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6326 { 6327 uint16_t passive; 6328 6329 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6330 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6331 } else { 6332 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6333 } 6334 6335 /* Clamp to the beacon interval if we're associated */ 6336 return (iwn_limit_dwell(sc, passive)); 6337 } 6338 6339 static int 6340 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 6341 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 6342 { 6343 struct ifnet *ifp = sc->sc_ifp; 6344 struct ieee80211com *ic = ifp->if_l2com; 6345 struct ieee80211_node *ni = vap->iv_bss; 6346 struct iwn_scan_hdr *hdr; 6347 struct iwn_cmd_data *tx; 6348 struct iwn_scan_essid *essid; 6349 struct iwn_scan_chan *chan; 6350 struct ieee80211_frame *wh; 6351 struct ieee80211_rateset *rs; 6352 uint8_t *buf, *frm; 6353 uint16_t rxchain; 6354 uint8_t txant; 6355 int buflen, error; 6356 int is_active; 6357 uint16_t dwell_active, dwell_passive; 6358 uint32_t extra, scan_service_time; 6359 6360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6361 6362 /* 6363 * We are absolutely not allowed to send a scan command when another 6364 * scan command is pending. 6365 */ 6366 if (sc->sc_is_scanning) { 6367 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6368 __func__); 6369 return (EAGAIN); 6370 } 6371 6372 /* Assign the scan channel */ 6373 c = ic->ic_curchan; 6374 6375 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6376 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 6377 if (buf == NULL) { 6378 device_printf(sc->sc_dev, 6379 "%s: could not allocate buffer for scan command\n", 6380 __func__); 6381 return ENOMEM; 6382 } 6383 hdr = (struct iwn_scan_hdr *)buf; 6384 /* 6385 * Move to the next channel if no frames are received within 10ms 6386 * after sending the probe request. 6387 */ 6388 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6389 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6390 /* 6391 * Max needs to be greater than active and passive and quiet! 6392 * It's also in microseconds! 6393 */ 6394 hdr->max_svc = htole32(250 * 1024); 6395 6396 /* 6397 * Reset scan: interval=100 6398 * Normal scan: interval=becaon interval 6399 * suspend_time: 100 (TU) 6400 * 6401 */ 6402 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6403 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6404 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6405 hdr->pause_svc = htole32(scan_service_time); 6406 6407 /* Select antennas for scanning. */ 6408 rxchain = 6409 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6410 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6411 IWN_RXCHAIN_DRIVER_FORCE; 6412 if (IEEE80211_IS_CHAN_A(c) && 6413 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6414 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6415 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6416 } else /* Use all available RX antennas. */ 6417 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6418 hdr->rxchain = htole16(rxchain); 6419 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6420 6421 tx = (struct iwn_cmd_data *)(hdr + 1); 6422 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6423 tx->id = sc->broadcast_id; 6424 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6425 6426 if (IEEE80211_IS_CHAN_5GHZ(c)) { 6427 /* Send probe requests at 6Mbps. */ 6428 tx->rate = htole32(0xd); 6429 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6430 } else { 6431 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6432 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6433 sc->rxon->associd && sc->rxon->chan > 14) 6434 tx->rate = htole32(0xd); 6435 else { 6436 /* Send probe requests at 1Mbps. */ 6437 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6438 } 6439 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6440 } 6441 /* Use the first valid TX antenna. */ 6442 txant = IWN_LSB(sc->txchainmask); 6443 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6444 6445 /* 6446 * Only do active scanning if we're announcing a probe request 6447 * for a given SSID (or more, if we ever add it to the driver.) 6448 */ 6449 is_active = 0; 6450 6451 /* 6452 * If we're scanning for a specific SSID, add it to the command. 6453 * 6454 * XXX maybe look at adding support for scanning multiple SSIDs? 6455 */ 6456 essid = (struct iwn_scan_essid *)(tx + 1); 6457 if (ss != NULL) { 6458 if (ss->ss_ssid[0].len != 0) { 6459 essid[0].id = IEEE80211_ELEMID_SSID; 6460 essid[0].len = ss->ss_ssid[0].len; 6461 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6462 } 6463 6464 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6465 __func__, 6466 ss->ss_ssid[0].len, 6467 ss->ss_ssid[0].len, 6468 ss->ss_ssid[0].ssid); 6469 6470 if (ss->ss_nssid > 0) 6471 is_active = 1; 6472 } 6473 6474 /* 6475 * Build a probe request frame. Most of the following code is a 6476 * copy & paste of what is done in net80211. 6477 */ 6478 wh = (struct ieee80211_frame *)(essid + 20); 6479 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6480 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6481 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6482 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6483 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6484 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6485 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6486 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6487 6488 frm = (uint8_t *)(wh + 1); 6489 frm = ieee80211_add_ssid(frm, NULL, 0); 6490 frm = ieee80211_add_rates(frm, rs); 6491 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6492 frm = ieee80211_add_xrates(frm, rs); 6493 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6494 frm = ieee80211_add_htcap(frm, ni); 6495 6496 /* Set length of probe request. */ 6497 tx->len = htole16(frm - (uint8_t *)wh); 6498 6499 /* 6500 * If active scanning is requested but a certain channel is 6501 * marked passive, we can do active scanning if we detect 6502 * transmissions. 6503 * 6504 * There is an issue with some firmware versions that triggers 6505 * a sysassert on a "good CRC threshold" of zero (== disabled), 6506 * on a radar channel even though this means that we should NOT 6507 * send probes. 6508 * 6509 * The "good CRC threshold" is the number of frames that we 6510 * need to receive during our dwell time on a channel before 6511 * sending out probes -- setting this to a huge value will 6512 * mean we never reach it, but at the same time work around 6513 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6514 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6515 * 6516 * This was fixed in later versions along with some other 6517 * scan changes, and the threshold behaves as a flag in those 6518 * versions. 6519 */ 6520 6521 /* 6522 * If we're doing active scanning, set the crc_threshold 6523 * to a suitable value. This is different to active veruss 6524 * passive scanning depending upon the channel flags; the 6525 * firmware will obey that particular check for us. 6526 */ 6527 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6528 hdr->crc_threshold = is_active ? 6529 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6530 else 6531 hdr->crc_threshold = is_active ? 6532 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6533 6534 chan = (struct iwn_scan_chan *)frm; 6535 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6536 chan->flags = 0; 6537 if (ss->ss_nssid > 0) 6538 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6539 chan->dsp_gain = 0x6e; 6540 6541 /* 6542 * Set the passive/active flag depending upon the channel mode. 6543 * XXX TODO: take the is_active flag into account as well? 6544 */ 6545 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6546 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6547 else 6548 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6549 6550 /* 6551 * Calculate the active/passive dwell times. 6552 */ 6553 6554 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6555 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6556 6557 /* Make sure they're valid */ 6558 if (dwell_passive <= dwell_active) 6559 dwell_passive = dwell_active + 1; 6560 6561 chan->active = htole16(dwell_active); 6562 chan->passive = htole16(dwell_passive); 6563 6564 if (IEEE80211_IS_CHAN_5GHZ(c) && 6565 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6566 chan->rf_gain = 0x3b; 6567 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6568 chan->rf_gain = 0x3b; 6569 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6570 chan->rf_gain = 0x28; 6571 } else { 6572 chan->rf_gain = 0x28; 6573 } 6574 6575 DPRINTF(sc, IWN_DEBUG_STATE, 6576 "%s: chan %u flags 0x%x rf_gain 0x%x " 6577 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6578 "isactive=%d numssid=%d\n", __func__, 6579 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6580 dwell_active, dwell_passive, scan_service_time, 6581 hdr->crc_threshold, is_active, ss->ss_nssid); 6582 6583 hdr->nchan++; 6584 chan++; 6585 buflen = (uint8_t *)chan - buf; 6586 hdr->len = htole16(buflen); 6587 6588 if (sc->sc_is_scanning) { 6589 device_printf(sc->sc_dev, 6590 "%s: called with is_scanning set!\n", 6591 __func__); 6592 } 6593 sc->sc_is_scanning = 1; 6594 6595 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6596 hdr->nchan); 6597 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6598 free(buf, M_DEVBUF); 6599 6600 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6601 6602 return error; 6603 } 6604 6605 static int 6606 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6607 { 6608 struct iwn_ops *ops = &sc->ops; 6609 struct ifnet *ifp = sc->sc_ifp; 6610 struct ieee80211com *ic = ifp->if_l2com; 6611 struct ieee80211_node *ni = vap->iv_bss; 6612 int error; 6613 6614 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6615 6616 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6617 /* Update adapter configuration. */ 6618 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6619 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6620 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6621 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6622 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6623 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6624 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6625 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6626 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6627 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6628 sc->rxon->cck_mask = 0; 6629 sc->rxon->ofdm_mask = 0x15; 6630 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6631 sc->rxon->cck_mask = 0x03; 6632 sc->rxon->ofdm_mask = 0; 6633 } else { 6634 /* Assume 802.11b/g. */ 6635 sc->rxon->cck_mask = 0x03; 6636 sc->rxon->ofdm_mask = 0x15; 6637 } 6638 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6639 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6640 sc->rxon->ofdm_mask); 6641 if (sc->sc_is_scanning) 6642 device_printf(sc->sc_dev, 6643 "%s: is_scanning set, before RXON\n", 6644 __func__); 6645 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6646 if (error != 0) { 6647 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6648 __func__, error); 6649 return error; 6650 } 6651 6652 /* Configuration has changed, set TX power accordingly. */ 6653 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6654 device_printf(sc->sc_dev, 6655 "%s: could not set TX power, error %d\n", __func__, error); 6656 return error; 6657 } 6658 /* 6659 * Reconfiguring RXON clears the firmware nodes table so we must 6660 * add the broadcast node again. 6661 */ 6662 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6663 device_printf(sc->sc_dev, 6664 "%s: could not add broadcast node, error %d\n", __func__, 6665 error); 6666 return error; 6667 } 6668 6669 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6670 6671 return 0; 6672 } 6673 6674 static int 6675 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6676 { 6677 struct iwn_ops *ops = &sc->ops; 6678 struct ifnet *ifp = sc->sc_ifp; 6679 struct ieee80211com *ic = ifp->if_l2com; 6680 struct ieee80211_node *ni = vap->iv_bss; 6681 struct iwn_node_info node; 6682 uint32_t htflags = 0; 6683 int error; 6684 6685 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6686 6687 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6688 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6689 /* Link LED blinks while monitoring. */ 6690 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6691 return 0; 6692 } 6693 if ((error = iwn_set_timing(sc, ni)) != 0) { 6694 device_printf(sc->sc_dev, 6695 "%s: could not set timing, error %d\n", __func__, error); 6696 return error; 6697 } 6698 6699 /* Update adapter configuration. */ 6700 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6701 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6702 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6703 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6704 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6705 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6706 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6707 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6708 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6709 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6710 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6711 sc->rxon->cck_mask = 0; 6712 sc->rxon->ofdm_mask = 0x15; 6713 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6714 sc->rxon->cck_mask = 0x03; 6715 sc->rxon->ofdm_mask = 0; 6716 } else { 6717 /* Assume 802.11b/g. */ 6718 sc->rxon->cck_mask = 0x0f; 6719 sc->rxon->ofdm_mask = 0x15; 6720 } 6721 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6722 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6723 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6724 switch (ic->ic_curhtprotmode) { 6725 case IEEE80211_HTINFO_OPMODE_HT20PR: 6726 htflags |= IWN_RXON_HT_MODEPURE40; 6727 break; 6728 default: 6729 htflags |= IWN_RXON_HT_MODEMIXED; 6730 break; 6731 } 6732 } 6733 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6734 htflags |= IWN_RXON_HT_HT40MINUS; 6735 } 6736 sc->rxon->flags |= htole32(htflags); 6737 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6738 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6739 sc->rxon->chan, sc->rxon->flags); 6740 if (sc->sc_is_scanning) 6741 device_printf(sc->sc_dev, 6742 "%s: is_scanning set, before RXON\n", 6743 __func__); 6744 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6745 if (error != 0) { 6746 device_printf(sc->sc_dev, 6747 "%s: could not update configuration, error %d\n", __func__, 6748 error); 6749 return error; 6750 } 6751 6752 /* Configuration has changed, set TX power accordingly. */ 6753 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6754 device_printf(sc->sc_dev, 6755 "%s: could not set TX power, error %d\n", __func__, error); 6756 return error; 6757 } 6758 6759 /* Fake a join to initialize the TX rate. */ 6760 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6761 iwn_newassoc(ni, 1); 6762 6763 /* Add BSS node. */ 6764 memset(&node, 0, sizeof node); 6765 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6766 node.id = IWN_ID_BSS; 6767 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6768 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6769 case IEEE80211_HTCAP_SMPS_ENA: 6770 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6771 break; 6772 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6773 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6774 break; 6775 } 6776 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6777 IWN_AMDPU_DENSITY(5)); /* 4us */ 6778 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6779 node.htflags |= htole32(IWN_NODE_HT40); 6780 } 6781 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6782 error = ops->add_node(sc, &node, 1); 6783 if (error != 0) { 6784 device_printf(sc->sc_dev, 6785 "%s: could not add BSS node, error %d\n", __func__, error); 6786 return error; 6787 } 6788 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6789 __func__, node.id); 6790 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6791 device_printf(sc->sc_dev, 6792 "%s: could not setup link quality for node %d, error %d\n", 6793 __func__, node.id, error); 6794 return error; 6795 } 6796 6797 if ((error = iwn_init_sensitivity(sc)) != 0) { 6798 device_printf(sc->sc_dev, 6799 "%s: could not set sensitivity, error %d\n", __func__, 6800 error); 6801 return error; 6802 } 6803 /* Start periodic calibration timer. */ 6804 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6805 sc->calib_cnt = 0; 6806 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6807 sc); 6808 6809 /* Link LED always on while associated. */ 6810 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6811 6812 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6813 6814 return 0; 6815 } 6816 6817 /* 6818 * This function is called by upper layer when an ADDBA request is received 6819 * from another STA and before the ADDBA response is sent. 6820 */ 6821 static int 6822 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6823 int baparamset, int batimeout, int baseqctl) 6824 { 6825 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6826 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6827 struct iwn_ops *ops = &sc->ops; 6828 struct iwn_node *wn = (void *)ni; 6829 struct iwn_node_info node; 6830 uint16_t ssn; 6831 uint8_t tid; 6832 int error; 6833 6834 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6835 6836 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6837 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 6838 6839 memset(&node, 0, sizeof node); 6840 node.id = wn->id; 6841 node.control = IWN_NODE_UPDATE; 6842 node.flags = IWN_FLAG_SET_ADDBA; 6843 node.addba_tid = tid; 6844 node.addba_ssn = htole16(ssn); 6845 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 6846 wn->id, tid, ssn); 6847 error = ops->add_node(sc, &node, 1); 6848 if (error != 0) 6849 return error; 6850 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 6851 #undef MS 6852 } 6853 6854 /* 6855 * This function is called by upper layer on teardown of an HT-immediate 6856 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 6857 */ 6858 static void 6859 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 6860 { 6861 struct ieee80211com *ic = ni->ni_ic; 6862 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6863 struct iwn_ops *ops = &sc->ops; 6864 struct iwn_node *wn = (void *)ni; 6865 struct iwn_node_info node; 6866 uint8_t tid; 6867 6868 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6869 6870 /* XXX: tid as an argument */ 6871 for (tid = 0; tid < WME_NUM_TID; tid++) { 6872 if (&ni->ni_rx_ampdu[tid] == rap) 6873 break; 6874 } 6875 6876 memset(&node, 0, sizeof node); 6877 node.id = wn->id; 6878 node.control = IWN_NODE_UPDATE; 6879 node.flags = IWN_FLAG_SET_DELBA; 6880 node.delba_tid = tid; 6881 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 6882 (void)ops->add_node(sc, &node, 1); 6883 sc->sc_ampdu_rx_stop(ni, rap); 6884 } 6885 6886 static int 6887 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6888 int dialogtoken, int baparamset, int batimeout) 6889 { 6890 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6891 int qid; 6892 6893 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6894 6895 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 6896 if (sc->qid2tap[qid] == NULL) 6897 break; 6898 } 6899 if (qid == sc->ntxqs) { 6900 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 6901 __func__); 6902 return 0; 6903 } 6904 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 6905 if (tap->txa_private == NULL) { 6906 device_printf(sc->sc_dev, 6907 "%s: failed to alloc TX aggregation structure\n", __func__); 6908 return 0; 6909 } 6910 sc->qid2tap[qid] = tap; 6911 *(int *)tap->txa_private = qid; 6912 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 6913 batimeout); 6914 } 6915 6916 static int 6917 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6918 int code, int baparamset, int batimeout) 6919 { 6920 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6921 int qid = *(int *)tap->txa_private; 6922 uint8_t tid = tap->txa_tid; 6923 int ret; 6924 6925 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6926 6927 if (code == IEEE80211_STATUS_SUCCESS) { 6928 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 6929 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 6930 if (ret != 1) 6931 return ret; 6932 } else { 6933 sc->qid2tap[qid] = NULL; 6934 free(tap->txa_private, M_DEVBUF); 6935 tap->txa_private = NULL; 6936 } 6937 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 6938 } 6939 6940 /* 6941 * This function is called by upper layer when an ADDBA response is received 6942 * from another STA. 6943 */ 6944 static int 6945 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 6946 uint8_t tid) 6947 { 6948 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 6949 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6950 struct iwn_ops *ops = &sc->ops; 6951 struct iwn_node *wn = (void *)ni; 6952 struct iwn_node_info node; 6953 int error, qid; 6954 6955 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6956 6957 /* Enable TX for the specified RA/TID. */ 6958 wn->disable_tid &= ~(1 << tid); 6959 memset(&node, 0, sizeof node); 6960 node.id = wn->id; 6961 node.control = IWN_NODE_UPDATE; 6962 node.flags = IWN_FLAG_SET_DISABLE_TID; 6963 node.disable_tid = htole16(wn->disable_tid); 6964 error = ops->add_node(sc, &node, 1); 6965 if (error != 0) 6966 return 0; 6967 6968 if ((error = iwn_nic_lock(sc)) != 0) 6969 return 0; 6970 qid = *(int *)tap->txa_private; 6971 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 6972 __func__, wn->id, tid, tap->txa_start, qid); 6973 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 6974 iwn_nic_unlock(sc); 6975 6976 iwn_set_link_quality(sc, ni); 6977 return 1; 6978 } 6979 6980 static void 6981 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6982 { 6983 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6984 struct iwn_ops *ops = &sc->ops; 6985 uint8_t tid = tap->txa_tid; 6986 int qid; 6987 6988 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6989 6990 sc->sc_addba_stop(ni, tap); 6991 6992 if (tap->txa_private == NULL) 6993 return; 6994 6995 qid = *(int *)tap->txa_private; 6996 if (sc->txq[qid].queued != 0) 6997 return; 6998 if (iwn_nic_lock(sc) != 0) 6999 return; 7000 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7001 iwn_nic_unlock(sc); 7002 sc->qid2tap[qid] = NULL; 7003 free(tap->txa_private, M_DEVBUF); 7004 tap->txa_private = NULL; 7005 } 7006 7007 static void 7008 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7009 int qid, uint8_t tid, uint16_t ssn) 7010 { 7011 struct iwn_node *wn = (void *)ni; 7012 7013 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7014 7015 /* Stop TX scheduler while we're changing its configuration. */ 7016 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7017 IWN4965_TXQ_STATUS_CHGACT); 7018 7019 /* Assign RA/TID translation to the queue. */ 7020 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7021 wn->id << 4 | tid); 7022 7023 /* Enable chain-building mode for the queue. */ 7024 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7025 7026 /* Set starting sequence number from the ADDBA request. */ 7027 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7028 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7029 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7030 7031 /* Set scheduler window size. */ 7032 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7033 IWN_SCHED_WINSZ); 7034 /* Set scheduler frame limit. */ 7035 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7036 IWN_SCHED_LIMIT << 16); 7037 7038 /* Enable interrupts for the queue. */ 7039 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7040 7041 /* Mark the queue as active. */ 7042 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7043 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7044 iwn_tid2fifo[tid] << 1); 7045 } 7046 7047 static void 7048 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7049 { 7050 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7051 7052 /* Stop TX scheduler while we're changing its configuration. */ 7053 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7054 IWN4965_TXQ_STATUS_CHGACT); 7055 7056 /* Set starting sequence number from the ADDBA request. */ 7057 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7058 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7059 7060 /* Disable interrupts for the queue. */ 7061 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7062 7063 /* Mark the queue as inactive. */ 7064 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7065 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7066 } 7067 7068 static void 7069 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7070 int qid, uint8_t tid, uint16_t ssn) 7071 { 7072 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7073 7074 struct iwn_node *wn = (void *)ni; 7075 7076 /* Stop TX scheduler while we're changing its configuration. */ 7077 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7078 IWN5000_TXQ_STATUS_CHGACT); 7079 7080 /* Assign RA/TID translation to the queue. */ 7081 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7082 wn->id << 4 | tid); 7083 7084 /* Enable chain-building mode for the queue. */ 7085 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7086 7087 /* Enable aggregation for the queue. */ 7088 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7089 7090 /* Set starting sequence number from the ADDBA request. */ 7091 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7092 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7093 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7094 7095 /* Set scheduler window size and frame limit. */ 7096 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7097 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7098 7099 /* Enable interrupts for the queue. */ 7100 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7101 7102 /* Mark the queue as active. */ 7103 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7104 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7105 } 7106 7107 static void 7108 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7109 { 7110 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7111 7112 /* Stop TX scheduler while we're changing its configuration. */ 7113 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7114 IWN5000_TXQ_STATUS_CHGACT); 7115 7116 /* Disable aggregation for the queue. */ 7117 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7118 7119 /* Set starting sequence number from the ADDBA request. */ 7120 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7121 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7122 7123 /* Disable interrupts for the queue. */ 7124 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7125 7126 /* Mark the queue as inactive. */ 7127 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7128 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7129 } 7130 7131 /* 7132 * Query calibration tables from the initialization firmware. We do this 7133 * only once at first boot. Called from a process context. 7134 */ 7135 static int 7136 iwn5000_query_calibration(struct iwn_softc *sc) 7137 { 7138 struct iwn5000_calib_config cmd; 7139 int error; 7140 7141 memset(&cmd, 0, sizeof cmd); 7142 cmd.ucode.once.enable = htole32(0xffffffff); 7143 cmd.ucode.once.start = htole32(0xffffffff); 7144 cmd.ucode.once.send = htole32(0xffffffff); 7145 cmd.ucode.flags = htole32(0xffffffff); 7146 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7147 __func__); 7148 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7149 if (error != 0) 7150 return error; 7151 7152 /* Wait at most two seconds for calibration to complete. */ 7153 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7154 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7155 return error; 7156 } 7157 7158 /* 7159 * Send calibration results to the runtime firmware. These results were 7160 * obtained on first boot from the initialization firmware. 7161 */ 7162 static int 7163 iwn5000_send_calibration(struct iwn_softc *sc) 7164 { 7165 int idx, error; 7166 7167 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7168 if (!(sc->base_params->calib_need & (1<<idx))) { 7169 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7170 "No need of calib %d\n", 7171 idx); 7172 continue; /* no need for this calib */ 7173 } 7174 if (sc->calibcmd[idx].buf == NULL) { 7175 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7176 "Need calib idx : %d but no available data\n", 7177 idx); 7178 continue; 7179 } 7180 7181 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7182 "send calibration result idx=%d len=%d\n", idx, 7183 sc->calibcmd[idx].len); 7184 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7185 sc->calibcmd[idx].len, 0); 7186 if (error != 0) { 7187 device_printf(sc->sc_dev, 7188 "%s: could not send calibration result, error %d\n", 7189 __func__, error); 7190 return error; 7191 } 7192 } 7193 return 0; 7194 } 7195 7196 static int 7197 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7198 { 7199 struct iwn5000_wimax_coex wimax; 7200 7201 #if 0 7202 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7203 /* Enable WiMAX coexistence for combo adapters. */ 7204 wimax.flags = 7205 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7206 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7207 IWN_WIMAX_COEX_STA_TABLE_VALID | 7208 IWN_WIMAX_COEX_ENABLE; 7209 memcpy(wimax.events, iwn6050_wimax_events, 7210 sizeof iwn6050_wimax_events); 7211 } else 7212 #endif 7213 { 7214 /* Disable WiMAX coexistence. */ 7215 wimax.flags = 0; 7216 memset(wimax.events, 0, sizeof wimax.events); 7217 } 7218 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7219 __func__); 7220 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7221 } 7222 7223 static int 7224 iwn5000_crystal_calib(struct iwn_softc *sc) 7225 { 7226 struct iwn5000_phy_calib_crystal cmd; 7227 7228 memset(&cmd, 0, sizeof cmd); 7229 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7230 cmd.ngroups = 1; 7231 cmd.isvalid = 1; 7232 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7233 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7234 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7235 cmd.cap_pin[0], cmd.cap_pin[1]); 7236 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7237 } 7238 7239 static int 7240 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7241 { 7242 struct iwn5000_phy_calib_temp_offset cmd; 7243 7244 memset(&cmd, 0, sizeof cmd); 7245 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7246 cmd.ngroups = 1; 7247 cmd.isvalid = 1; 7248 if (sc->eeprom_temp != 0) 7249 cmd.offset = htole16(sc->eeprom_temp); 7250 else 7251 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7252 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7253 le16toh(cmd.offset)); 7254 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7255 } 7256 7257 static int 7258 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7259 { 7260 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7261 7262 memset(&cmd, 0, sizeof cmd); 7263 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7264 cmd.ngroups = 1; 7265 cmd.isvalid = 1; 7266 if (sc->eeprom_temp != 0) { 7267 cmd.offset_low = htole16(sc->eeprom_temp); 7268 cmd.offset_high = htole16(sc->eeprom_temp_high); 7269 } else { 7270 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7271 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7272 } 7273 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7274 7275 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7276 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7277 le16toh(cmd.offset_low), 7278 le16toh(cmd.offset_high), 7279 le16toh(cmd.burnt_voltage_ref)); 7280 7281 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7282 } 7283 7284 /* 7285 * This function is called after the runtime firmware notifies us of its 7286 * readiness (called in a process context). 7287 */ 7288 static int 7289 iwn4965_post_alive(struct iwn_softc *sc) 7290 { 7291 int error, qid; 7292 7293 if ((error = iwn_nic_lock(sc)) != 0) 7294 return error; 7295 7296 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7297 7298 /* Clear TX scheduler state in SRAM. */ 7299 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7300 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7301 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7302 7303 /* Set physical address of TX scheduler rings (1KB aligned). */ 7304 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7305 7306 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7307 7308 /* Disable chain mode for all our 16 queues. */ 7309 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7310 7311 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7312 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7313 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7314 7315 /* Set scheduler window size. */ 7316 iwn_mem_write(sc, sc->sched_base + 7317 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7318 /* Set scheduler frame limit. */ 7319 iwn_mem_write(sc, sc->sched_base + 7320 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7321 IWN_SCHED_LIMIT << 16); 7322 } 7323 7324 /* Enable interrupts for all our 16 queues. */ 7325 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7326 /* Identify TX FIFO rings (0-7). */ 7327 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7328 7329 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7330 for (qid = 0; qid < 7; qid++) { 7331 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7332 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7333 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7334 } 7335 iwn_nic_unlock(sc); 7336 return 0; 7337 } 7338 7339 /* 7340 * This function is called after the initialization or runtime firmware 7341 * notifies us of its readiness (called in a process context). 7342 */ 7343 static int 7344 iwn5000_post_alive(struct iwn_softc *sc) 7345 { 7346 int error, qid; 7347 7348 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7349 7350 /* Switch to using ICT interrupt mode. */ 7351 iwn5000_ict_reset(sc); 7352 7353 if ((error = iwn_nic_lock(sc)) != 0){ 7354 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7355 return error; 7356 } 7357 7358 /* Clear TX scheduler state in SRAM. */ 7359 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7360 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7361 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7362 7363 /* Set physical address of TX scheduler rings (1KB aligned). */ 7364 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7365 7366 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7367 7368 /* Enable chain mode for all queues, except command queue. */ 7369 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7370 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7371 else 7372 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7373 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7374 7375 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7376 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7377 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7378 7379 iwn_mem_write(sc, sc->sched_base + 7380 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7381 /* Set scheduler window size and frame limit. */ 7382 iwn_mem_write(sc, sc->sched_base + 7383 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7384 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7385 } 7386 7387 /* Enable interrupts for all our 20 queues. */ 7388 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7389 /* Identify TX FIFO rings (0-7). */ 7390 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7391 7392 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7393 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7394 /* Mark TX rings as active. */ 7395 for (qid = 0; qid < 11; qid++) { 7396 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7397 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7398 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7399 } 7400 } else { 7401 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7402 for (qid = 0; qid < 7; qid++) { 7403 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7404 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7405 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7406 } 7407 } 7408 iwn_nic_unlock(sc); 7409 7410 /* Configure WiMAX coexistence for combo adapters. */ 7411 error = iwn5000_send_wimax_coex(sc); 7412 if (error != 0) { 7413 device_printf(sc->sc_dev, 7414 "%s: could not configure WiMAX coexistence, error %d\n", 7415 __func__, error); 7416 return error; 7417 } 7418 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7419 /* Perform crystal calibration. */ 7420 error = iwn5000_crystal_calib(sc); 7421 if (error != 0) { 7422 device_printf(sc->sc_dev, 7423 "%s: crystal calibration failed, error %d\n", 7424 __func__, error); 7425 return error; 7426 } 7427 } 7428 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7429 /* Query calibration from the initialization firmware. */ 7430 if ((error = iwn5000_query_calibration(sc)) != 0) { 7431 device_printf(sc->sc_dev, 7432 "%s: could not query calibration, error %d\n", 7433 __func__, error); 7434 return error; 7435 } 7436 /* 7437 * We have the calibration results now, reboot with the 7438 * runtime firmware (call ourselves recursively!) 7439 */ 7440 iwn_hw_stop(sc); 7441 error = iwn_hw_init(sc); 7442 } else { 7443 /* Send calibration results to runtime firmware. */ 7444 error = iwn5000_send_calibration(sc); 7445 } 7446 7447 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7448 7449 return error; 7450 } 7451 7452 /* 7453 * The firmware boot code is small and is intended to be copied directly into 7454 * the NIC internal memory (no DMA transfer). 7455 */ 7456 static int 7457 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7458 { 7459 int error, ntries; 7460 7461 size /= sizeof (uint32_t); 7462 7463 if ((error = iwn_nic_lock(sc)) != 0) 7464 return error; 7465 7466 /* Copy microcode image into NIC memory. */ 7467 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7468 (const uint32_t *)ucode, size); 7469 7470 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7471 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7472 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7473 7474 /* Start boot load now. */ 7475 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7476 7477 /* Wait for transfer to complete. */ 7478 for (ntries = 0; ntries < 1000; ntries++) { 7479 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7480 IWN_BSM_WR_CTRL_START)) 7481 break; 7482 DELAY(10); 7483 } 7484 if (ntries == 1000) { 7485 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7486 __func__); 7487 iwn_nic_unlock(sc); 7488 return ETIMEDOUT; 7489 } 7490 7491 /* Enable boot after power up. */ 7492 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7493 7494 iwn_nic_unlock(sc); 7495 return 0; 7496 } 7497 7498 static int 7499 iwn4965_load_firmware(struct iwn_softc *sc) 7500 { 7501 struct iwn_fw_info *fw = &sc->fw; 7502 struct iwn_dma_info *dma = &sc->fw_dma; 7503 int error; 7504 7505 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7506 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7507 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7508 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7509 fw->init.text, fw->init.textsz); 7510 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7511 7512 /* Tell adapter where to find initialization sections. */ 7513 if ((error = iwn_nic_lock(sc)) != 0) 7514 return error; 7515 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7516 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7517 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7518 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7519 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7520 iwn_nic_unlock(sc); 7521 7522 /* Load firmware boot code. */ 7523 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7524 if (error != 0) { 7525 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7526 __func__); 7527 return error; 7528 } 7529 /* Now press "execute". */ 7530 IWN_WRITE(sc, IWN_RESET, 0); 7531 7532 /* Wait at most one second for first alive notification. */ 7533 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7534 device_printf(sc->sc_dev, 7535 "%s: timeout waiting for adapter to initialize, error %d\n", 7536 __func__, error); 7537 return error; 7538 } 7539 7540 /* Retrieve current temperature for initial TX power calibration. */ 7541 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7542 sc->temp = iwn4965_get_temperature(sc); 7543 7544 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7545 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7546 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7547 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7548 fw->main.text, fw->main.textsz); 7549 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7550 7551 /* Tell adapter where to find runtime sections. */ 7552 if ((error = iwn_nic_lock(sc)) != 0) 7553 return error; 7554 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7555 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7556 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7557 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7558 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7559 IWN_FW_UPDATED | fw->main.textsz); 7560 iwn_nic_unlock(sc); 7561 7562 return 0; 7563 } 7564 7565 static int 7566 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7567 const uint8_t *section, int size) 7568 { 7569 struct iwn_dma_info *dma = &sc->fw_dma; 7570 int error; 7571 7572 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7573 7574 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7575 memcpy(dma->vaddr, section, size); 7576 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7577 7578 if ((error = iwn_nic_lock(sc)) != 0) 7579 return error; 7580 7581 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7582 IWN_FH_TX_CONFIG_DMA_PAUSE); 7583 7584 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7585 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7586 IWN_LOADDR(dma->paddr)); 7587 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7588 IWN_HIADDR(dma->paddr) << 28 | size); 7589 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7590 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7591 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7592 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7593 7594 /* Kick Flow Handler to start DMA transfer. */ 7595 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7596 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7597 7598 iwn_nic_unlock(sc); 7599 7600 /* Wait at most five seconds for FH DMA transfer to complete. */ 7601 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 7602 } 7603 7604 static int 7605 iwn5000_load_firmware(struct iwn_softc *sc) 7606 { 7607 struct iwn_fw_part *fw; 7608 int error; 7609 7610 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7611 7612 /* Load the initialization firmware on first boot only. */ 7613 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7614 &sc->fw.main : &sc->fw.init; 7615 7616 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7617 fw->text, fw->textsz); 7618 if (error != 0) { 7619 device_printf(sc->sc_dev, 7620 "%s: could not load firmware %s section, error %d\n", 7621 __func__, ".text", error); 7622 return error; 7623 } 7624 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7625 fw->data, fw->datasz); 7626 if (error != 0) { 7627 device_printf(sc->sc_dev, 7628 "%s: could not load firmware %s section, error %d\n", 7629 __func__, ".data", error); 7630 return error; 7631 } 7632 7633 /* Now press "execute". */ 7634 IWN_WRITE(sc, IWN_RESET, 0); 7635 return 0; 7636 } 7637 7638 /* 7639 * Extract text and data sections from a legacy firmware image. 7640 */ 7641 static int 7642 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7643 { 7644 const uint32_t *ptr; 7645 size_t hdrlen = 24; 7646 uint32_t rev; 7647 7648 ptr = (const uint32_t *)fw->data; 7649 rev = le32toh(*ptr++); 7650 7651 /* Check firmware API version. */ 7652 if (IWN_FW_API(rev) <= 1) { 7653 device_printf(sc->sc_dev, 7654 "%s: bad firmware, need API version >=2\n", __func__); 7655 return EINVAL; 7656 } 7657 if (IWN_FW_API(rev) >= 3) { 7658 /* Skip build number (version 2 header). */ 7659 hdrlen += 4; 7660 ptr++; 7661 } 7662 if (fw->size < hdrlen) { 7663 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7664 __func__, fw->size); 7665 return EINVAL; 7666 } 7667 fw->main.textsz = le32toh(*ptr++); 7668 fw->main.datasz = le32toh(*ptr++); 7669 fw->init.textsz = le32toh(*ptr++); 7670 fw->init.datasz = le32toh(*ptr++); 7671 fw->boot.textsz = le32toh(*ptr++); 7672 7673 /* Check that all firmware sections fit. */ 7674 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7675 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7676 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7677 __func__, fw->size); 7678 return EINVAL; 7679 } 7680 7681 /* Get pointers to firmware sections. */ 7682 fw->main.text = (const uint8_t *)ptr; 7683 fw->main.data = fw->main.text + fw->main.textsz; 7684 fw->init.text = fw->main.data + fw->main.datasz; 7685 fw->init.data = fw->init.text + fw->init.textsz; 7686 fw->boot.text = fw->init.data + fw->init.datasz; 7687 return 0; 7688 } 7689 7690 /* 7691 * Extract text and data sections from a TLV firmware image. 7692 */ 7693 static int 7694 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7695 uint16_t alt) 7696 { 7697 const struct iwn_fw_tlv_hdr *hdr; 7698 const struct iwn_fw_tlv *tlv; 7699 const uint8_t *ptr, *end; 7700 uint64_t altmask; 7701 uint32_t len, tmp; 7702 7703 if (fw->size < sizeof (*hdr)) { 7704 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7705 __func__, fw->size); 7706 return EINVAL; 7707 } 7708 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7709 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7710 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7711 __func__, le32toh(hdr->signature)); 7712 return EINVAL; 7713 } 7714 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7715 le32toh(hdr->build)); 7716 7717 /* 7718 * Select the closest supported alternative that is less than 7719 * or equal to the specified one. 7720 */ 7721 altmask = le64toh(hdr->altmask); 7722 while (alt > 0 && !(altmask & (1ULL << alt))) 7723 alt--; /* Downgrade. */ 7724 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7725 7726 ptr = (const uint8_t *)(hdr + 1); 7727 end = (const uint8_t *)(fw->data + fw->size); 7728 7729 /* Parse type-length-value fields. */ 7730 while (ptr + sizeof (*tlv) <= end) { 7731 tlv = (const struct iwn_fw_tlv *)ptr; 7732 len = le32toh(tlv->len); 7733 7734 ptr += sizeof (*tlv); 7735 if (ptr + len > end) { 7736 device_printf(sc->sc_dev, 7737 "%s: firmware too short: %zu bytes\n", __func__, 7738 fw->size); 7739 return EINVAL; 7740 } 7741 /* Skip other alternatives. */ 7742 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7743 goto next; 7744 7745 switch (le16toh(tlv->type)) { 7746 case IWN_FW_TLV_MAIN_TEXT: 7747 fw->main.text = ptr; 7748 fw->main.textsz = len; 7749 break; 7750 case IWN_FW_TLV_MAIN_DATA: 7751 fw->main.data = ptr; 7752 fw->main.datasz = len; 7753 break; 7754 case IWN_FW_TLV_INIT_TEXT: 7755 fw->init.text = ptr; 7756 fw->init.textsz = len; 7757 break; 7758 case IWN_FW_TLV_INIT_DATA: 7759 fw->init.data = ptr; 7760 fw->init.datasz = len; 7761 break; 7762 case IWN_FW_TLV_BOOT_TEXT: 7763 fw->boot.text = ptr; 7764 fw->boot.textsz = len; 7765 break; 7766 case IWN_FW_TLV_ENH_SENS: 7767 if (!len) 7768 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7769 break; 7770 case IWN_FW_TLV_PHY_CALIB: 7771 tmp = le32toh(*ptr); 7772 if (tmp < 253) { 7773 sc->reset_noise_gain = tmp; 7774 sc->noise_gain = tmp + 1; 7775 } 7776 break; 7777 case IWN_FW_TLV_PAN: 7778 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7779 DPRINTF(sc, IWN_DEBUG_RESET, 7780 "PAN Support found: %d\n", 1); 7781 break; 7782 case IWN_FW_TLV_FLAGS: 7783 if (len < sizeof(uint32_t)) 7784 break; 7785 if (len % sizeof(uint32_t)) 7786 break; 7787 sc->tlv_feature_flags = le32toh(*ptr); 7788 DPRINTF(sc, IWN_DEBUG_RESET, 7789 "%s: feature: 0x%08x\n", 7790 __func__, 7791 sc->tlv_feature_flags); 7792 break; 7793 case IWN_FW_TLV_PBREQ_MAXLEN: 7794 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7795 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7796 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7797 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7798 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7799 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7800 case IWN_FW_TLV_WOWLAN_INST: 7801 case IWN_FW_TLV_WOWLAN_DATA: 7802 DPRINTF(sc, IWN_DEBUG_RESET, 7803 "TLV type %d reconized but not handled\n", 7804 le16toh(tlv->type)); 7805 break; 7806 default: 7807 DPRINTF(sc, IWN_DEBUG_RESET, 7808 "TLV type %d not handled\n", le16toh(tlv->type)); 7809 break; 7810 } 7811 next: /* TLV fields are 32-bit aligned. */ 7812 ptr += (len + 3) & ~3; 7813 } 7814 return 0; 7815 } 7816 7817 static int 7818 iwn_read_firmware(struct iwn_softc *sc) 7819 { 7820 struct iwn_fw_info *fw = &sc->fw; 7821 int error; 7822 7823 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7824 7825 IWN_UNLOCK(sc); 7826 7827 memset(fw, 0, sizeof (*fw)); 7828 7829 /* Read firmware image from filesystem. */ 7830 sc->fw_fp = firmware_get(sc->fwname); 7831 if (sc->fw_fp == NULL) { 7832 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7833 __func__, sc->fwname); 7834 IWN_LOCK(sc); 7835 return EINVAL; 7836 } 7837 IWN_LOCK(sc); 7838 7839 fw->size = sc->fw_fp->datasize; 7840 fw->data = (const uint8_t *)sc->fw_fp->data; 7841 if (fw->size < sizeof (uint32_t)) { 7842 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7843 __func__, fw->size); 7844 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7845 sc->fw_fp = NULL; 7846 return EINVAL; 7847 } 7848 7849 /* Retrieve text and data sections. */ 7850 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 7851 error = iwn_read_firmware_leg(sc, fw); 7852 else 7853 error = iwn_read_firmware_tlv(sc, fw, 1); 7854 if (error != 0) { 7855 device_printf(sc->sc_dev, 7856 "%s: could not read firmware sections, error %d\n", 7857 __func__, error); 7858 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7859 sc->fw_fp = NULL; 7860 return error; 7861 } 7862 7863 /* Make sure text and data sections fit in hardware memory. */ 7864 if (fw->main.textsz > sc->fw_text_maxsz || 7865 fw->main.datasz > sc->fw_data_maxsz || 7866 fw->init.textsz > sc->fw_text_maxsz || 7867 fw->init.datasz > sc->fw_data_maxsz || 7868 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 7869 (fw->boot.textsz & 3) != 0) { 7870 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 7871 __func__); 7872 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7873 sc->fw_fp = NULL; 7874 return EINVAL; 7875 } 7876 7877 /* We can proceed with loading the firmware. */ 7878 return 0; 7879 } 7880 7881 static int 7882 iwn_clock_wait(struct iwn_softc *sc) 7883 { 7884 int ntries; 7885 7886 /* Set "initialization complete" bit. */ 7887 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7888 7889 /* Wait for clock stabilization. */ 7890 for (ntries = 0; ntries < 2500; ntries++) { 7891 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 7892 return 0; 7893 DELAY(10); 7894 } 7895 device_printf(sc->sc_dev, 7896 "%s: timeout waiting for clock stabilization\n", __func__); 7897 return ETIMEDOUT; 7898 } 7899 7900 static int 7901 iwn_apm_init(struct iwn_softc *sc) 7902 { 7903 uint32_t reg; 7904 int error; 7905 7906 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7907 7908 /* Disable L0s exit timer (NMI bug workaround). */ 7909 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 7910 /* Don't wait for ICH L0s (ICH bug workaround). */ 7911 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 7912 7913 /* Set FH wait threshold to max (HW bug under stress workaround). */ 7914 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 7915 7916 /* Enable HAP INTA to move adapter from L1a to L0s. */ 7917 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 7918 7919 /* Retrieve PCIe Active State Power Management (ASPM). */ 7920 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 7921 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 7922 if (reg & 0x02) /* L1 Entry enabled. */ 7923 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7924 else 7925 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7926 7927 if (sc->base_params->pll_cfg_val) 7928 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 7929 7930 /* Wait for clock stabilization before accessing prph. */ 7931 if ((error = iwn_clock_wait(sc)) != 0) 7932 return error; 7933 7934 if ((error = iwn_nic_lock(sc)) != 0) 7935 return error; 7936 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 7937 /* Enable DMA and BSM (Bootstrap State Machine). */ 7938 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7939 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 7940 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 7941 } else { 7942 /* Enable DMA. */ 7943 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7944 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7945 } 7946 DELAY(20); 7947 /* Disable L1-Active. */ 7948 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 7949 iwn_nic_unlock(sc); 7950 7951 return 0; 7952 } 7953 7954 static void 7955 iwn_apm_stop_master(struct iwn_softc *sc) 7956 { 7957 int ntries; 7958 7959 /* Stop busmaster DMA activity. */ 7960 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 7961 for (ntries = 0; ntries < 100; ntries++) { 7962 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 7963 return; 7964 DELAY(10); 7965 } 7966 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 7967 } 7968 7969 static void 7970 iwn_apm_stop(struct iwn_softc *sc) 7971 { 7972 iwn_apm_stop_master(sc); 7973 7974 /* Reset the entire device. */ 7975 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 7976 DELAY(10); 7977 /* Clear "initialization complete" bit. */ 7978 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7979 } 7980 7981 static int 7982 iwn4965_nic_config(struct iwn_softc *sc) 7983 { 7984 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7985 7986 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 7987 /* 7988 * I don't believe this to be correct but this is what the 7989 * vendor driver is doing. Probably the bits should not be 7990 * shifted in IWN_RFCFG_*. 7991 */ 7992 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7993 IWN_RFCFG_TYPE(sc->rfcfg) | 7994 IWN_RFCFG_STEP(sc->rfcfg) | 7995 IWN_RFCFG_DASH(sc->rfcfg)); 7996 } 7997 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7998 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7999 return 0; 8000 } 8001 8002 static int 8003 iwn5000_nic_config(struct iwn_softc *sc) 8004 { 8005 uint32_t tmp; 8006 int error; 8007 8008 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8009 8010 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8011 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8012 IWN_RFCFG_TYPE(sc->rfcfg) | 8013 IWN_RFCFG_STEP(sc->rfcfg) | 8014 IWN_RFCFG_DASH(sc->rfcfg)); 8015 } 8016 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8017 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8018 8019 if ((error = iwn_nic_lock(sc)) != 0) 8020 return error; 8021 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8022 8023 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8024 /* 8025 * Select first Switching Voltage Regulator (1.32V) to 8026 * solve a stability issue related to noisy DC2DC line 8027 * in the silicon of 1000 Series. 8028 */ 8029 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8030 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8031 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8032 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8033 } 8034 iwn_nic_unlock(sc); 8035 8036 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8037 /* Use internal power amplifier only. */ 8038 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8039 } 8040 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8041 /* Indicate that ROM calibration version is >=6. */ 8042 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8043 } 8044 if (sc->base_params->additional_gp_drv_bit) 8045 IWN_SETBITS(sc, IWN_GP_DRIVER, 8046 sc->base_params->additional_gp_drv_bit); 8047 return 0; 8048 } 8049 8050 /* 8051 * Take NIC ownership over Intel Active Management Technology (AMT). 8052 */ 8053 static int 8054 iwn_hw_prepare(struct iwn_softc *sc) 8055 { 8056 int ntries; 8057 8058 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8059 8060 /* Check if hardware is ready. */ 8061 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8062 for (ntries = 0; ntries < 5; ntries++) { 8063 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8064 IWN_HW_IF_CONFIG_NIC_READY) 8065 return 0; 8066 DELAY(10); 8067 } 8068 8069 /* Hardware not ready, force into ready state. */ 8070 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8071 for (ntries = 0; ntries < 15000; ntries++) { 8072 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8073 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8074 break; 8075 DELAY(10); 8076 } 8077 if (ntries == 15000) 8078 return ETIMEDOUT; 8079 8080 /* Hardware should be ready now. */ 8081 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8082 for (ntries = 0; ntries < 5; ntries++) { 8083 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8084 IWN_HW_IF_CONFIG_NIC_READY) 8085 return 0; 8086 DELAY(10); 8087 } 8088 return ETIMEDOUT; 8089 } 8090 8091 static int 8092 iwn_hw_init(struct iwn_softc *sc) 8093 { 8094 struct iwn_ops *ops = &sc->ops; 8095 int error, chnl, qid; 8096 8097 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8098 8099 /* Clear pending interrupts. */ 8100 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8101 8102 if ((error = iwn_apm_init(sc)) != 0) { 8103 device_printf(sc->sc_dev, 8104 "%s: could not power ON adapter, error %d\n", __func__, 8105 error); 8106 return error; 8107 } 8108 8109 /* Select VMAIN power source. */ 8110 if ((error = iwn_nic_lock(sc)) != 0) 8111 return error; 8112 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8113 iwn_nic_unlock(sc); 8114 8115 /* Perform adapter-specific initialization. */ 8116 if ((error = ops->nic_config(sc)) != 0) 8117 return error; 8118 8119 /* Initialize RX ring. */ 8120 if ((error = iwn_nic_lock(sc)) != 0) 8121 return error; 8122 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8123 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8124 /* Set physical address of RX ring (256-byte aligned). */ 8125 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8126 /* Set physical address of RX status (16-byte aligned). */ 8127 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8128 /* Enable RX. */ 8129 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8130 IWN_FH_RX_CONFIG_ENA | 8131 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8132 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8133 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8134 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8135 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8136 iwn_nic_unlock(sc); 8137 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8138 8139 if ((error = iwn_nic_lock(sc)) != 0) 8140 return error; 8141 8142 /* Initialize TX scheduler. */ 8143 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8144 8145 /* Set physical address of "keep warm" page (16-byte aligned). */ 8146 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8147 8148 /* Initialize TX rings. */ 8149 for (qid = 0; qid < sc->ntxqs; qid++) { 8150 struct iwn_tx_ring *txq = &sc->txq[qid]; 8151 8152 /* Set physical address of TX ring (256-byte aligned). */ 8153 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8154 txq->desc_dma.paddr >> 8); 8155 } 8156 iwn_nic_unlock(sc); 8157 8158 /* Enable DMA channels. */ 8159 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8160 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8161 IWN_FH_TX_CONFIG_DMA_ENA | 8162 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8163 } 8164 8165 /* Clear "radio off" and "commands blocked" bits. */ 8166 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8167 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8168 8169 /* Clear pending interrupts. */ 8170 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8171 /* Enable interrupt coalescing. */ 8172 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8173 /* Enable interrupts. */ 8174 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8175 8176 /* _Really_ make sure "radio off" bit is cleared! */ 8177 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8178 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8179 8180 /* Enable shadow registers. */ 8181 if (sc->base_params->shadow_reg_enable) 8182 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8183 8184 if ((error = ops->load_firmware(sc)) != 0) { 8185 device_printf(sc->sc_dev, 8186 "%s: could not load firmware, error %d\n", __func__, 8187 error); 8188 return error; 8189 } 8190 /* Wait at most one second for firmware alive notification. */ 8191 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8192 device_printf(sc->sc_dev, 8193 "%s: timeout waiting for adapter to initialize, error %d\n", 8194 __func__, error); 8195 return error; 8196 } 8197 /* Do post-firmware initialization. */ 8198 8199 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8200 8201 return ops->post_alive(sc); 8202 } 8203 8204 static void 8205 iwn_hw_stop(struct iwn_softc *sc) 8206 { 8207 int chnl, qid, ntries; 8208 8209 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8210 8211 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8212 8213 /* Disable interrupts. */ 8214 IWN_WRITE(sc, IWN_INT_MASK, 0); 8215 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8216 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8217 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8218 8219 /* Make sure we no longer hold the NIC lock. */ 8220 iwn_nic_unlock(sc); 8221 8222 /* Stop TX scheduler. */ 8223 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8224 8225 /* Stop all DMA channels. */ 8226 if (iwn_nic_lock(sc) == 0) { 8227 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8228 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8229 for (ntries = 0; ntries < 200; ntries++) { 8230 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8231 IWN_FH_TX_STATUS_IDLE(chnl)) 8232 break; 8233 DELAY(10); 8234 } 8235 } 8236 iwn_nic_unlock(sc); 8237 } 8238 8239 /* Stop RX ring. */ 8240 iwn_reset_rx_ring(sc, &sc->rxq); 8241 8242 /* Reset all TX rings. */ 8243 for (qid = 0; qid < sc->ntxqs; qid++) 8244 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8245 8246 if (iwn_nic_lock(sc) == 0) { 8247 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8248 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8249 iwn_nic_unlock(sc); 8250 } 8251 DELAY(5); 8252 /* Power OFF adapter. */ 8253 iwn_apm_stop(sc); 8254 } 8255 8256 static void 8257 iwn_radio_on(void *arg0, int pending) 8258 { 8259 struct iwn_softc *sc = arg0; 8260 struct ifnet *ifp = sc->sc_ifp; 8261 struct ieee80211com *ic = ifp->if_l2com; 8262 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8263 8264 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8265 8266 if (vap != NULL) { 8267 iwn_init(sc); 8268 ieee80211_init(vap); 8269 } 8270 } 8271 8272 static void 8273 iwn_radio_off(void *arg0, int pending) 8274 { 8275 struct iwn_softc *sc = arg0; 8276 struct ifnet *ifp = sc->sc_ifp; 8277 struct ieee80211com *ic = ifp->if_l2com; 8278 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8279 8280 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8281 8282 iwn_stop(sc); 8283 if (vap != NULL) 8284 ieee80211_stop(vap); 8285 8286 /* Enable interrupts to get RF toggle notification. */ 8287 IWN_LOCK(sc); 8288 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8289 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8290 IWN_UNLOCK(sc); 8291 } 8292 8293 static void 8294 iwn_init_locked(struct iwn_softc *sc) 8295 { 8296 struct ifnet *ifp = sc->sc_ifp; 8297 int error; 8298 8299 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8300 8301 IWN_LOCK_ASSERT(sc); 8302 8303 if ((error = iwn_hw_prepare(sc)) != 0) { 8304 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8305 __func__, error); 8306 goto fail; 8307 } 8308 8309 /* Initialize interrupt mask to default value. */ 8310 sc->int_mask = IWN_INT_MASK_DEF; 8311 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8312 8313 /* Check that the radio is not disabled by hardware switch. */ 8314 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8315 device_printf(sc->sc_dev, 8316 "radio is disabled by hardware switch\n"); 8317 /* Enable interrupts to get RF toggle notifications. */ 8318 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8319 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8320 return; 8321 } 8322 8323 /* Read firmware images from the filesystem. */ 8324 if ((error = iwn_read_firmware(sc)) != 0) { 8325 device_printf(sc->sc_dev, 8326 "%s: could not read firmware, error %d\n", __func__, 8327 error); 8328 goto fail; 8329 } 8330 8331 /* Initialize hardware and upload firmware. */ 8332 error = iwn_hw_init(sc); 8333 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8334 sc->fw_fp = NULL; 8335 if (error != 0) { 8336 device_printf(sc->sc_dev, 8337 "%s: could not initialize hardware, error %d\n", __func__, 8338 error); 8339 goto fail; 8340 } 8341 8342 /* Configure adapter now that it is ready. */ 8343 if ((error = iwn_config(sc)) != 0) { 8344 device_printf(sc->sc_dev, 8345 "%s: could not configure device, error %d\n", __func__, 8346 error); 8347 goto fail; 8348 } 8349 8350 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 8351 ifp->if_drv_flags |= IFF_DRV_RUNNING; 8352 8353 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 8354 8355 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8356 8357 return; 8358 8359 fail: iwn_stop_locked(sc); 8360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8361 } 8362 8363 static void 8364 iwn_init(void *arg) 8365 { 8366 struct iwn_softc *sc = arg; 8367 struct ifnet *ifp = sc->sc_ifp; 8368 struct ieee80211com *ic = ifp->if_l2com; 8369 8370 IWN_LOCK(sc); 8371 iwn_init_locked(sc); 8372 IWN_UNLOCK(sc); 8373 8374 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 8375 ieee80211_start_all(ic); 8376 } 8377 8378 static void 8379 iwn_stop_locked(struct iwn_softc *sc) 8380 { 8381 struct ifnet *ifp = sc->sc_ifp; 8382 8383 IWN_LOCK_ASSERT(sc); 8384 8385 sc->sc_is_scanning = 0; 8386 sc->sc_tx_timer = 0; 8387 callout_stop(&sc->watchdog_to); 8388 callout_stop(&sc->calib_to); 8389 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 8390 8391 /* Power OFF hardware. */ 8392 iwn_hw_stop(sc); 8393 } 8394 8395 static void 8396 iwn_stop(struct iwn_softc *sc) 8397 { 8398 IWN_LOCK(sc); 8399 iwn_stop_locked(sc); 8400 IWN_UNLOCK(sc); 8401 } 8402 8403 /* 8404 * Callback from net80211 to start a scan. 8405 */ 8406 static void 8407 iwn_scan_start(struct ieee80211com *ic) 8408 { 8409 struct ifnet *ifp = ic->ic_ifp; 8410 struct iwn_softc *sc = ifp->if_softc; 8411 8412 IWN_LOCK(sc); 8413 /* make the link LED blink while we're scanning */ 8414 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8415 IWN_UNLOCK(sc); 8416 } 8417 8418 /* 8419 * Callback from net80211 to terminate a scan. 8420 */ 8421 static void 8422 iwn_scan_end(struct ieee80211com *ic) 8423 { 8424 struct ifnet *ifp = ic->ic_ifp; 8425 struct iwn_softc *sc = ifp->if_softc; 8426 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8427 8428 IWN_LOCK(sc); 8429 if (vap->iv_state == IEEE80211_S_RUN) { 8430 /* Set link LED to ON status if we are associated */ 8431 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8432 } 8433 IWN_UNLOCK(sc); 8434 } 8435 8436 /* 8437 * Callback from net80211 to force a channel change. 8438 */ 8439 static void 8440 iwn_set_channel(struct ieee80211com *ic) 8441 { 8442 const struct ieee80211_channel *c = ic->ic_curchan; 8443 struct ifnet *ifp = ic->ic_ifp; 8444 struct iwn_softc *sc = ifp->if_softc; 8445 int error; 8446 8447 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8448 8449 IWN_LOCK(sc); 8450 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8451 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8452 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8453 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8454 8455 /* 8456 * Only need to set the channel in Monitor mode. AP scanning and auth 8457 * are already taken care of by their respective firmware commands. 8458 */ 8459 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8460 error = iwn_config(sc); 8461 if (error != 0) 8462 device_printf(sc->sc_dev, 8463 "%s: error %d settting channel\n", __func__, error); 8464 } 8465 IWN_UNLOCK(sc); 8466 } 8467 8468 /* 8469 * Callback from net80211 to start scanning of the current channel. 8470 */ 8471 static void 8472 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8473 { 8474 struct ieee80211vap *vap = ss->ss_vap; 8475 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8476 struct ieee80211com *ic = vap->iv_ic; 8477 int error; 8478 8479 IWN_LOCK(sc); 8480 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 8481 IWN_UNLOCK(sc); 8482 if (error != 0) 8483 ieee80211_cancel_scan(vap); 8484 } 8485 8486 /* 8487 * Callback from net80211 to handle the minimum dwell time being met. 8488 * The intent is to terminate the scan but we just let the firmware 8489 * notify us when it's finished as we have no safe way to abort it. 8490 */ 8491 static void 8492 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8493 { 8494 /* NB: don't try to abort scan; wait for firmware to finish */ 8495 } 8496 8497 static void 8498 iwn_hw_reset(void *arg0, int pending) 8499 { 8500 struct iwn_softc *sc = arg0; 8501 struct ifnet *ifp = sc->sc_ifp; 8502 struct ieee80211com *ic = ifp->if_l2com; 8503 8504 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8505 8506 iwn_stop(sc); 8507 iwn_init(sc); 8508 ieee80211_notify_radio(ic, 1); 8509 } 8510 #ifdef IWN_DEBUG 8511 #define IWN_DESC(x) case x: return #x 8512 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8513 8514 /* 8515 * Translate CSR code to string 8516 */ 8517 static char *iwn_get_csr_string(int csr) 8518 { 8519 switch (csr) { 8520 IWN_DESC(IWN_HW_IF_CONFIG); 8521 IWN_DESC(IWN_INT_COALESCING); 8522 IWN_DESC(IWN_INT); 8523 IWN_DESC(IWN_INT_MASK); 8524 IWN_DESC(IWN_FH_INT); 8525 IWN_DESC(IWN_GPIO_IN); 8526 IWN_DESC(IWN_RESET); 8527 IWN_DESC(IWN_GP_CNTRL); 8528 IWN_DESC(IWN_HW_REV); 8529 IWN_DESC(IWN_EEPROM); 8530 IWN_DESC(IWN_EEPROM_GP); 8531 IWN_DESC(IWN_OTP_GP); 8532 IWN_DESC(IWN_GIO); 8533 IWN_DESC(IWN_GP_UCODE); 8534 IWN_DESC(IWN_GP_DRIVER); 8535 IWN_DESC(IWN_UCODE_GP1); 8536 IWN_DESC(IWN_UCODE_GP2); 8537 IWN_DESC(IWN_LED); 8538 IWN_DESC(IWN_DRAM_INT_TBL); 8539 IWN_DESC(IWN_GIO_CHICKEN); 8540 IWN_DESC(IWN_ANA_PLL); 8541 IWN_DESC(IWN_HW_REV_WA); 8542 IWN_DESC(IWN_DBG_HPET_MEM); 8543 default: 8544 return "UNKNOWN CSR"; 8545 } 8546 } 8547 8548 /* 8549 * This function print firmware register 8550 */ 8551 static void 8552 iwn_debug_register(struct iwn_softc *sc) 8553 { 8554 int i; 8555 static const uint32_t csr_tbl[] = { 8556 IWN_HW_IF_CONFIG, 8557 IWN_INT_COALESCING, 8558 IWN_INT, 8559 IWN_INT_MASK, 8560 IWN_FH_INT, 8561 IWN_GPIO_IN, 8562 IWN_RESET, 8563 IWN_GP_CNTRL, 8564 IWN_HW_REV, 8565 IWN_EEPROM, 8566 IWN_EEPROM_GP, 8567 IWN_OTP_GP, 8568 IWN_GIO, 8569 IWN_GP_UCODE, 8570 IWN_GP_DRIVER, 8571 IWN_UCODE_GP1, 8572 IWN_UCODE_GP2, 8573 IWN_LED, 8574 IWN_DRAM_INT_TBL, 8575 IWN_GIO_CHICKEN, 8576 IWN_ANA_PLL, 8577 IWN_HW_REV_WA, 8578 IWN_DBG_HPET_MEM, 8579 }; 8580 DPRINTF(sc, IWN_DEBUG_REGISTER, 8581 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8582 "\n"); 8583 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8584 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8585 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8586 if ((i+1) % 3 == 0) 8587 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8588 } 8589 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8590 } 8591 #endif 8592