1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 23 * adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/sockio.h> 31 #include <sys/sysctl.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/endian.h> 40 #include <sys/firmware.h> 41 #include <sys/limits.h> 42 #include <sys/module.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <machine/clock.h> 49 50 #include <dev/pci/pcireg.h> 51 #include <dev/pci/pcivar.h> 52 53 #include <net/bpf.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/in_var.h> 64 #include <netinet/if_ether.h> 65 #include <netinet/ip.h> 66 67 #include <net80211/ieee80211_var.h> 68 #include <net80211/ieee80211_radiotap.h> 69 #include <net80211/ieee80211_regdomain.h> 70 #include <net80211/ieee80211_ratectl.h> 71 72 #include <dev/iwn/if_iwnreg.h> 73 #include <dev/iwn/if_iwnvar.h> 74 75 struct iwn_ident { 76 uint16_t vendor; 77 uint16_t device; 78 const char *name; 79 }; 80 81 static const struct iwn_ident iwn_ident_table[] = { 82 { 0x8086, 0x0082, "Intel Centrino Advanced-N 6205" }, 83 { 0x8086, 0x0083, "Intel Centrino Wireless-N 1000" }, 84 { 0x8086, 0x0084, "Intel Centrino Wireless-N 1000" }, 85 { 0x8086, 0x0085, "Intel Centrino Advanced-N 6205" }, 86 { 0x8086, 0x0087, "Intel Centrino Advanced-N + WiMAX 6250" }, 87 { 0x8086, 0x0089, "Intel Centrino Advanced-N + WiMAX 6250" }, 88 { 0x8086, 0x008a, "Intel Centrino Wireless-N 1030" }, 89 { 0x8086, 0x008b, "Intel Centrino Wireless-N 1030" }, 90 { 0x8086, 0x0090, "Intel Centrino Advanced-N 6230" }, 91 { 0x8086, 0x0091, "Intel Centrino Advanced-N 6230" }, 92 { 0x8086, 0x0885, "Intel Centrino Wireless-N + WiMAX 6150" }, 93 { 0x8086, 0x0886, "Intel Centrino Wireless-N + WiMAX 6150" }, 94 { 0x8086, 0x0896, "Intel Centrino Wireless-N 130" }, 95 { 0x8086, 0x4229, "Intel Wireless WiFi Link 4965" }, 96 { 0x8086, 0x422b, "Intel Centrino Ultimate-N 6300" }, 97 { 0x8086, 0x422c, "Intel Centrino Advanced-N 6200" }, 98 { 0x8086, 0x422d, "Intel Wireless WiFi Link 4965" }, 99 { 0x8086, 0x4230, "Intel Wireless WiFi Link 4965" }, 100 { 0x8086, 0x4232, "Intel WiFi Link 5100" }, 101 { 0x8086, 0x4233, "Intel Wireless WiFi Link 4965" }, 102 { 0x8086, 0x4235, "Intel Ultimate N WiFi Link 5300" }, 103 { 0x8086, 0x4236, "Intel Ultimate N WiFi Link 5300" }, 104 { 0x8086, 0x4237, "Intel WiFi Link 5100" }, 105 { 0x8086, 0x4238, "Intel Centrino Ultimate-N 6300" }, 106 { 0x8086, 0x4239, "Intel Centrino Advanced-N 6200" }, 107 { 0x8086, 0x423a, "Intel WiMAX/WiFi Link 5350" }, 108 { 0x8086, 0x423b, "Intel WiMAX/WiFi Link 5350" }, 109 { 0x8086, 0x423c, "Intel WiMAX/WiFi Link 5150" }, 110 { 0x8086, 0x423d, "Intel WiMAX/WiFi Link 5150" }, 111 { 0, 0, NULL } 112 }; 113 114 static int iwn_probe(device_t); 115 static int iwn_attach(device_t); 116 static int iwn4965_attach(struct iwn_softc *, uint16_t); 117 static int iwn5000_attach(struct iwn_softc *, uint16_t); 118 static void iwn_radiotap_attach(struct iwn_softc *); 119 static void iwn_sysctlattach(struct iwn_softc *); 120 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 121 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 122 const uint8_t [IEEE80211_ADDR_LEN], 123 const uint8_t [IEEE80211_ADDR_LEN]); 124 static void iwn_vap_delete(struct ieee80211vap *); 125 static int iwn_detach(device_t); 126 static int iwn_shutdown(device_t); 127 static int iwn_suspend(device_t); 128 static int iwn_resume(device_t); 129 static int iwn_nic_lock(struct iwn_softc *); 130 static int iwn_eeprom_lock(struct iwn_softc *); 131 static int iwn_init_otprom(struct iwn_softc *); 132 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 133 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 134 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 135 void **, bus_size_t, bus_size_t); 136 static void iwn_dma_contig_free(struct iwn_dma_info *); 137 static int iwn_alloc_sched(struct iwn_softc *); 138 static void iwn_free_sched(struct iwn_softc *); 139 static int iwn_alloc_kw(struct iwn_softc *); 140 static void iwn_free_kw(struct iwn_softc *); 141 static int iwn_alloc_ict(struct iwn_softc *); 142 static void iwn_free_ict(struct iwn_softc *); 143 static int iwn_alloc_fwmem(struct iwn_softc *); 144 static void iwn_free_fwmem(struct iwn_softc *); 145 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 146 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 147 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 148 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 149 int); 150 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 151 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 152 static void iwn5000_ict_reset(struct iwn_softc *); 153 static int iwn_read_eeprom(struct iwn_softc *, 154 uint8_t macaddr[IEEE80211_ADDR_LEN]); 155 static void iwn4965_read_eeprom(struct iwn_softc *); 156 static void iwn4965_print_power_group(struct iwn_softc *, int); 157 static void iwn5000_read_eeprom(struct iwn_softc *); 158 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 159 static void iwn_read_eeprom_band(struct iwn_softc *, int); 160 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 161 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 162 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 163 struct ieee80211_channel *); 164 static int iwn_setregdomain(struct ieee80211com *, 165 struct ieee80211_regdomain *, int, 166 struct ieee80211_channel[]); 167 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 168 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 169 const uint8_t mac[IEEE80211_ADDR_LEN]); 170 static void iwn_newassoc(struct ieee80211_node *, int); 171 static int iwn_media_change(struct ifnet *); 172 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 173 static void iwn_calib_timeout(void *); 174 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 175 struct iwn_rx_data *); 176 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 177 struct iwn_rx_data *); 178 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 179 struct iwn_rx_data *); 180 static void iwn5000_rx_calib_results(struct iwn_softc *, 181 struct iwn_rx_desc *, struct iwn_rx_data *); 182 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 183 struct iwn_rx_data *); 184 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 185 struct iwn_rx_data *); 186 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 187 struct iwn_rx_data *); 188 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 189 uint8_t); 190 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 191 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 192 static void iwn_notif_intr(struct iwn_softc *); 193 static void iwn_wakeup_intr(struct iwn_softc *); 194 static void iwn_rftoggle_intr(struct iwn_softc *); 195 static void iwn_fatal_intr(struct iwn_softc *); 196 static void iwn_intr(void *); 197 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 198 uint16_t); 199 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 200 uint16_t); 201 #ifdef notyet 202 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 203 #endif 204 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 205 struct ieee80211_node *); 206 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 207 struct ieee80211_node *, 208 const struct ieee80211_bpf_params *params); 209 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 210 const struct ieee80211_bpf_params *); 211 static void iwn_start(struct ifnet *); 212 static void iwn_start_locked(struct ifnet *); 213 static void iwn_watchdog(void *); 214 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 215 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 216 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 217 int); 218 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 219 int); 220 static int iwn_set_link_quality(struct iwn_softc *, 221 struct ieee80211_node *); 222 static int iwn_add_broadcast_node(struct iwn_softc *, int); 223 static int iwn_updateedca(struct ieee80211com *); 224 static void iwn_update_mcast(struct ifnet *); 225 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 226 static int iwn_set_critical_temp(struct iwn_softc *); 227 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 228 static void iwn4965_power_calibration(struct iwn_softc *, int); 229 static int iwn4965_set_txpower(struct iwn_softc *, 230 struct ieee80211_channel *, int); 231 static int iwn5000_set_txpower(struct iwn_softc *, 232 struct ieee80211_channel *, int); 233 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 234 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 235 static int iwn_get_noise(const struct iwn_rx_general_stats *); 236 static int iwn4965_get_temperature(struct iwn_softc *); 237 static int iwn5000_get_temperature(struct iwn_softc *); 238 static int iwn_init_sensitivity(struct iwn_softc *); 239 static void iwn_collect_noise(struct iwn_softc *, 240 const struct iwn_rx_general_stats *); 241 static int iwn4965_init_gains(struct iwn_softc *); 242 static int iwn5000_init_gains(struct iwn_softc *); 243 static int iwn4965_set_gains(struct iwn_softc *); 244 static int iwn5000_set_gains(struct iwn_softc *); 245 static void iwn_tune_sensitivity(struct iwn_softc *, 246 const struct iwn_rx_stats *); 247 static int iwn_send_sensitivity(struct iwn_softc *); 248 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 249 static int iwn_send_btcoex(struct iwn_softc *); 250 static int iwn_send_advanced_btcoex(struct iwn_softc *); 251 static int iwn5000_runtime_calib(struct iwn_softc *); 252 static int iwn_config(struct iwn_softc *); 253 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 254 static int iwn_scan(struct iwn_softc *); 255 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 256 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 257 static int iwn_ampdu_rx_start(struct ieee80211_node *, 258 struct ieee80211_rx_ampdu *, int, int, int); 259 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 260 struct ieee80211_rx_ampdu *); 261 static int iwn_addba_request(struct ieee80211_node *, 262 struct ieee80211_tx_ampdu *, int, int, int); 263 static int iwn_addba_response(struct ieee80211_node *, 264 struct ieee80211_tx_ampdu *, int, int, int); 265 static int iwn_ampdu_tx_start(struct ieee80211com *, 266 struct ieee80211_node *, uint8_t); 267 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 268 struct ieee80211_tx_ampdu *); 269 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 270 struct ieee80211_node *, int, uint8_t, uint16_t); 271 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 272 uint8_t, uint16_t); 273 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 274 struct ieee80211_node *, int, uint8_t, uint16_t); 275 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 276 uint8_t, uint16_t); 277 static int iwn5000_query_calibration(struct iwn_softc *); 278 static int iwn5000_send_calibration(struct iwn_softc *); 279 static int iwn5000_send_wimax_coex(struct iwn_softc *); 280 static int iwn5000_crystal_calib(struct iwn_softc *); 281 static int iwn5000_temp_offset_calib(struct iwn_softc *); 282 static int iwn4965_post_alive(struct iwn_softc *); 283 static int iwn5000_post_alive(struct iwn_softc *); 284 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 285 int); 286 static int iwn4965_load_firmware(struct iwn_softc *); 287 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 288 const uint8_t *, int); 289 static int iwn5000_load_firmware(struct iwn_softc *); 290 static int iwn_read_firmware_leg(struct iwn_softc *, 291 struct iwn_fw_info *); 292 static int iwn_read_firmware_tlv(struct iwn_softc *, 293 struct iwn_fw_info *, uint16_t); 294 static int iwn_read_firmware(struct iwn_softc *); 295 static int iwn_clock_wait(struct iwn_softc *); 296 static int iwn_apm_init(struct iwn_softc *); 297 static void iwn_apm_stop_master(struct iwn_softc *); 298 static void iwn_apm_stop(struct iwn_softc *); 299 static int iwn4965_nic_config(struct iwn_softc *); 300 static int iwn5000_nic_config(struct iwn_softc *); 301 static int iwn_hw_prepare(struct iwn_softc *); 302 static int iwn_hw_init(struct iwn_softc *); 303 static void iwn_hw_stop(struct iwn_softc *); 304 static void iwn_radio_on(void *, int); 305 static void iwn_radio_off(void *, int); 306 static void iwn_init_locked(struct iwn_softc *); 307 static void iwn_init(void *); 308 static void iwn_stop_locked(struct iwn_softc *); 309 static void iwn_stop(struct iwn_softc *); 310 static void iwn_scan_start(struct ieee80211com *); 311 static void iwn_scan_end(struct ieee80211com *); 312 static void iwn_set_channel(struct ieee80211com *); 313 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 314 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 315 static void iwn_hw_reset(void *, int); 316 317 #define IWN_DEBUG 318 #ifdef IWN_DEBUG 319 enum { 320 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 321 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 322 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 323 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 324 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 325 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 326 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 327 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 328 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 329 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 330 IWN_DEBUG_NODE = 0x00000400, /* node management */ 331 IWN_DEBUG_LED = 0x00000800, /* led management */ 332 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 333 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 334 IWN_DEBUG_ANY = 0xffffffff 335 }; 336 337 #define DPRINTF(sc, m, fmt, ...) do { \ 338 if (sc->sc_debug & (m)) \ 339 printf(fmt, __VA_ARGS__); \ 340 } while (0) 341 342 static const char * 343 iwn_intr_str(uint8_t cmd) 344 { 345 switch (cmd) { 346 /* Notifications */ 347 case IWN_UC_READY: return "UC_READY"; 348 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 349 case IWN_TX_DONE: return "TX_DONE"; 350 case IWN_START_SCAN: return "START_SCAN"; 351 case IWN_STOP_SCAN: return "STOP_SCAN"; 352 case IWN_RX_STATISTICS: return "RX_STATS"; 353 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 354 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 355 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 356 case IWN_RX_PHY: return "RX_PHY"; 357 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 358 case IWN_RX_DONE: return "RX_DONE"; 359 360 /* Command Notifications */ 361 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 362 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 363 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 364 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 365 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 366 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 367 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 368 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 369 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 370 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 371 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 372 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 373 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 374 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 375 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 376 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 377 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 378 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 379 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 380 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 381 } 382 return "UNKNOWN INTR NOTIF/CMD"; 383 } 384 #else 385 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 386 #endif 387 388 static device_method_t iwn_methods[] = { 389 /* Device interface */ 390 DEVMETHOD(device_probe, iwn_probe), 391 DEVMETHOD(device_attach, iwn_attach), 392 DEVMETHOD(device_detach, iwn_detach), 393 DEVMETHOD(device_shutdown, iwn_shutdown), 394 DEVMETHOD(device_suspend, iwn_suspend), 395 DEVMETHOD(device_resume, iwn_resume), 396 { 0, 0 } 397 }; 398 399 static driver_t iwn_driver = { 400 "iwn", 401 iwn_methods, 402 sizeof(struct iwn_softc) 403 }; 404 static devclass_t iwn_devclass; 405 406 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 407 408 MODULE_VERSION(iwn, 1); 409 410 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 411 MODULE_DEPEND(iwn, pci, 1, 1, 1); 412 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 413 414 static int 415 iwn_probe(device_t dev) 416 { 417 const struct iwn_ident *ident; 418 419 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 420 if (pci_get_vendor(dev) == ident->vendor && 421 pci_get_device(dev) == ident->device) { 422 device_set_desc(dev, ident->name); 423 return 0; 424 } 425 } 426 return ENXIO; 427 } 428 429 static int 430 iwn_attach(device_t dev) 431 { 432 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 433 struct ieee80211com *ic; 434 struct ifnet *ifp; 435 uint32_t reg; 436 int i, error, result; 437 uint8_t macaddr[IEEE80211_ADDR_LEN]; 438 439 sc->sc_dev = dev; 440 441 /* 442 * Get the offset of the PCI Express Capability Structure in PCI 443 * Configuration Space. 444 */ 445 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 446 if (error != 0) { 447 device_printf(dev, "PCIe capability structure not found!\n"); 448 return error; 449 } 450 451 /* Clear device-specific "PCI retry timeout" register (41h). */ 452 pci_write_config(dev, 0x41, 0, 1); 453 454 /* Hardware bug workaround. */ 455 reg = pci_read_config(dev, PCIR_COMMAND, 1); 456 if (reg & PCIM_CMD_INTxDIS) { 457 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 458 __func__); 459 reg &= ~PCIM_CMD_INTxDIS; 460 pci_write_config(dev, PCIR_COMMAND, reg, 1); 461 } 462 463 /* Enable bus-mastering. */ 464 pci_enable_busmaster(dev); 465 466 sc->mem_rid = PCIR_BAR(0); 467 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 468 RF_ACTIVE); 469 if (sc->mem == NULL) { 470 device_printf(dev, "can't map mem space\n"); 471 error = ENOMEM; 472 return error; 473 } 474 sc->sc_st = rman_get_bustag(sc->mem); 475 sc->sc_sh = rman_get_bushandle(sc->mem); 476 477 sc->irq_rid = 0; 478 if ((result = pci_msi_count(dev)) == 1 && 479 pci_alloc_msi(dev, &result) == 0) 480 sc->irq_rid = 1; 481 /* Install interrupt handler. */ 482 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 483 RF_ACTIVE | RF_SHAREABLE); 484 if (sc->irq == NULL) { 485 device_printf(dev, "can't map interrupt\n"); 486 error = ENOMEM; 487 goto fail; 488 } 489 490 IWN_LOCK_INIT(sc); 491 492 /* Read hardware revision and attach. */ 493 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 494 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 495 error = iwn4965_attach(sc, pci_get_device(dev)); 496 else 497 error = iwn5000_attach(sc, pci_get_device(dev)); 498 if (error != 0) { 499 device_printf(dev, "could not attach device, error %d\n", 500 error); 501 goto fail; 502 } 503 504 if ((error = iwn_hw_prepare(sc)) != 0) { 505 device_printf(dev, "hardware not ready, error %d\n", error); 506 goto fail; 507 } 508 509 /* Allocate DMA memory for firmware transfers. */ 510 if ((error = iwn_alloc_fwmem(sc)) != 0) { 511 device_printf(dev, 512 "could not allocate memory for firmware, error %d\n", 513 error); 514 goto fail; 515 } 516 517 /* Allocate "Keep Warm" page. */ 518 if ((error = iwn_alloc_kw(sc)) != 0) { 519 device_printf(dev, 520 "could not allocate keep warm page, error %d\n", error); 521 goto fail; 522 } 523 524 /* Allocate ICT table for 5000 Series. */ 525 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 526 (error = iwn_alloc_ict(sc)) != 0) { 527 device_printf(dev, "could not allocate ICT table, error %d\n", 528 error); 529 goto fail; 530 } 531 532 /* Allocate TX scheduler "rings". */ 533 if ((error = iwn_alloc_sched(sc)) != 0) { 534 device_printf(dev, 535 "could not allocate TX scheduler rings, error %d\n", error); 536 goto fail; 537 } 538 539 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 540 for (i = 0; i < sc->ntxqs; i++) { 541 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 542 device_printf(dev, 543 "could not allocate TX ring %d, error %d\n", i, 544 error); 545 goto fail; 546 } 547 } 548 549 /* Allocate RX ring. */ 550 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 551 device_printf(dev, "could not allocate RX ring, error %d\n", 552 error); 553 goto fail; 554 } 555 556 /* Clear pending interrupts. */ 557 IWN_WRITE(sc, IWN_INT, 0xffffffff); 558 559 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 560 if (ifp == NULL) { 561 device_printf(dev, "can not allocate ifnet structure\n"); 562 goto fail; 563 } 564 565 ic = ifp->if_l2com; 566 ic->ic_ifp = ifp; 567 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 568 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 569 570 /* Set device capabilities. */ 571 ic->ic_caps = 572 IEEE80211_C_STA /* station mode supported */ 573 | IEEE80211_C_MONITOR /* monitor mode supported */ 574 | IEEE80211_C_BGSCAN /* background scanning */ 575 | IEEE80211_C_TXPMGT /* tx power management */ 576 | IEEE80211_C_SHSLOT /* short slot time supported */ 577 | IEEE80211_C_WPA 578 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 579 #if 0 580 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 581 #endif 582 | IEEE80211_C_WME /* WME */ 583 ; 584 585 /* Read MAC address, channels, etc from EEPROM. */ 586 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 587 device_printf(dev, "could not read EEPROM, error %d\n", 588 error); 589 goto fail; 590 } 591 592 /* Count the number of available chains. */ 593 sc->ntxchains = 594 ((sc->txchainmask >> 2) & 1) + 595 ((sc->txchainmask >> 1) & 1) + 596 ((sc->txchainmask >> 0) & 1); 597 sc->nrxchains = 598 ((sc->rxchainmask >> 2) & 1) + 599 ((sc->rxchainmask >> 1) & 1) + 600 ((sc->rxchainmask >> 0) & 1); 601 if (bootverbose) { 602 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 603 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 604 macaddr, ":"); 605 } 606 607 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 608 ic->ic_rxstream = sc->nrxchains; 609 ic->ic_txstream = sc->ntxchains; 610 ic->ic_htcaps = 611 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 612 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 613 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 614 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 615 #ifdef notyet 616 | IEEE80211_HTCAP_GREENFIELD 617 #if IWN_RBUF_SIZE == 8192 618 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 619 #else 620 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 621 #endif 622 #endif 623 /* s/w capabilities */ 624 | IEEE80211_HTC_HT /* HT operation */ 625 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 626 #ifdef notyet 627 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 628 #endif 629 ; 630 } 631 632 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 633 ifp->if_softc = sc; 634 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 635 ifp->if_init = iwn_init; 636 ifp->if_ioctl = iwn_ioctl; 637 ifp->if_start = iwn_start; 638 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 639 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 640 IFQ_SET_READY(&ifp->if_snd); 641 642 ieee80211_ifattach(ic, macaddr); 643 ic->ic_vap_create = iwn_vap_create; 644 ic->ic_vap_delete = iwn_vap_delete; 645 ic->ic_raw_xmit = iwn_raw_xmit; 646 ic->ic_node_alloc = iwn_node_alloc; 647 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 648 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 649 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 650 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 651 sc->sc_addba_request = ic->ic_addba_request; 652 ic->ic_addba_request = iwn_addba_request; 653 sc->sc_addba_response = ic->ic_addba_response; 654 ic->ic_addba_response = iwn_addba_response; 655 sc->sc_addba_stop = ic->ic_addba_stop; 656 ic->ic_addba_stop = iwn_ampdu_tx_stop; 657 ic->ic_newassoc = iwn_newassoc; 658 ic->ic_wme.wme_update = iwn_updateedca; 659 ic->ic_update_mcast = iwn_update_mcast; 660 ic->ic_scan_start = iwn_scan_start; 661 ic->ic_scan_end = iwn_scan_end; 662 ic->ic_set_channel = iwn_set_channel; 663 ic->ic_scan_curchan = iwn_scan_curchan; 664 ic->ic_scan_mindwell = iwn_scan_mindwell; 665 ic->ic_setregdomain = iwn_setregdomain; 666 667 iwn_radiotap_attach(sc); 668 669 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 670 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 671 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 672 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 673 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 674 675 iwn_sysctlattach(sc); 676 677 /* 678 * Hook our interrupt after all initialization is complete. 679 */ 680 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 681 NULL, iwn_intr, sc, &sc->sc_ih); 682 if (error != 0) { 683 device_printf(dev, "can't establish interrupt, error %d\n", 684 error); 685 goto fail; 686 } 687 688 if (bootverbose) 689 ieee80211_announce(ic); 690 return 0; 691 fail: 692 iwn_detach(dev); 693 return error; 694 } 695 696 static int 697 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 698 { 699 struct iwn_ops *ops = &sc->ops; 700 701 ops->load_firmware = iwn4965_load_firmware; 702 ops->read_eeprom = iwn4965_read_eeprom; 703 ops->post_alive = iwn4965_post_alive; 704 ops->nic_config = iwn4965_nic_config; 705 ops->update_sched = iwn4965_update_sched; 706 ops->get_temperature = iwn4965_get_temperature; 707 ops->get_rssi = iwn4965_get_rssi; 708 ops->set_txpower = iwn4965_set_txpower; 709 ops->init_gains = iwn4965_init_gains; 710 ops->set_gains = iwn4965_set_gains; 711 ops->add_node = iwn4965_add_node; 712 ops->tx_done = iwn4965_tx_done; 713 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 714 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 715 sc->ntxqs = IWN4965_NTXQUEUES; 716 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 717 sc->ndmachnls = IWN4965_NDMACHNLS; 718 sc->broadcast_id = IWN4965_ID_BROADCAST; 719 sc->rxonsz = IWN4965_RXONSZ; 720 sc->schedsz = IWN4965_SCHEDSZ; 721 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 722 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 723 sc->fwsz = IWN4965_FWSZ; 724 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 725 sc->limits = &iwn4965_sensitivity_limits; 726 sc->fwname = "iwn4965fw"; 727 /* Override chains masks, ROM is known to be broken. */ 728 sc->txchainmask = IWN_ANT_AB; 729 sc->rxchainmask = IWN_ANT_ABC; 730 731 return 0; 732 } 733 734 static int 735 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 736 { 737 struct iwn_ops *ops = &sc->ops; 738 739 ops->load_firmware = iwn5000_load_firmware; 740 ops->read_eeprom = iwn5000_read_eeprom; 741 ops->post_alive = iwn5000_post_alive; 742 ops->nic_config = iwn5000_nic_config; 743 ops->update_sched = iwn5000_update_sched; 744 ops->get_temperature = iwn5000_get_temperature; 745 ops->get_rssi = iwn5000_get_rssi; 746 ops->set_txpower = iwn5000_set_txpower; 747 ops->init_gains = iwn5000_init_gains; 748 ops->set_gains = iwn5000_set_gains; 749 ops->add_node = iwn5000_add_node; 750 ops->tx_done = iwn5000_tx_done; 751 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 752 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 753 sc->ntxqs = IWN5000_NTXQUEUES; 754 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 755 sc->ndmachnls = IWN5000_NDMACHNLS; 756 sc->broadcast_id = IWN5000_ID_BROADCAST; 757 sc->rxonsz = IWN5000_RXONSZ; 758 sc->schedsz = IWN5000_SCHEDSZ; 759 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 760 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 761 sc->fwsz = IWN5000_FWSZ; 762 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 763 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 764 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 765 766 switch (sc->hw_type) { 767 case IWN_HW_REV_TYPE_5100: 768 sc->limits = &iwn5000_sensitivity_limits; 769 sc->fwname = "iwn5000fw"; 770 /* Override chains masks, ROM is known to be broken. */ 771 sc->txchainmask = IWN_ANT_B; 772 sc->rxchainmask = IWN_ANT_AB; 773 break; 774 case IWN_HW_REV_TYPE_5150: 775 sc->limits = &iwn5150_sensitivity_limits; 776 sc->fwname = "iwn5150fw"; 777 break; 778 case IWN_HW_REV_TYPE_5300: 779 case IWN_HW_REV_TYPE_5350: 780 sc->limits = &iwn5000_sensitivity_limits; 781 sc->fwname = "iwn5000fw"; 782 break; 783 case IWN_HW_REV_TYPE_1000: 784 sc->limits = &iwn1000_sensitivity_limits; 785 sc->fwname = "iwn1000fw"; 786 break; 787 case IWN_HW_REV_TYPE_6000: 788 sc->limits = &iwn6000_sensitivity_limits; 789 sc->fwname = "iwn6000fw"; 790 if (pid == 0x422c || pid == 0x4239) { 791 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 792 /* Override chains masks, ROM is known to be broken. */ 793 sc->txchainmask = IWN_ANT_BC; 794 sc->rxchainmask = IWN_ANT_BC; 795 } 796 break; 797 case IWN_HW_REV_TYPE_6050: 798 sc->limits = &iwn6000_sensitivity_limits; 799 sc->fwname = "iwn6050fw"; 800 /* Override chains masks, ROM is known to be broken. */ 801 sc->txchainmask = IWN_ANT_AB; 802 sc->rxchainmask = IWN_ANT_AB; 803 break; 804 case IWN_HW_REV_TYPE_6005: 805 sc->limits = &iwn6000_sensitivity_limits; 806 if (pid != 0x0082 && pid != 0x0085) { 807 sc->fwname = "iwn6000g2bfw"; 808 sc->sc_flags |= IWN_FLAG_ADV_BTCOEX; 809 } else 810 sc->fwname = "iwn6000g2afw"; 811 break; 812 default: 813 device_printf(sc->sc_dev, "adapter type %d not supported\n", 814 sc->hw_type); 815 return ENOTSUP; 816 } 817 return 0; 818 } 819 820 /* 821 * Attach the interface to 802.11 radiotap. 822 */ 823 static void 824 iwn_radiotap_attach(struct iwn_softc *sc) 825 { 826 struct ifnet *ifp = sc->sc_ifp; 827 struct ieee80211com *ic = ifp->if_l2com; 828 829 ieee80211_radiotap_attach(ic, 830 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 831 IWN_TX_RADIOTAP_PRESENT, 832 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 833 IWN_RX_RADIOTAP_PRESENT); 834 } 835 836 static void 837 iwn_sysctlattach(struct iwn_softc *sc) 838 { 839 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 840 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 841 842 #ifdef IWN_DEBUG 843 sc->sc_debug = 0; 844 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 845 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 846 #endif 847 } 848 849 static struct ieee80211vap * 850 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 851 enum ieee80211_opmode opmode, int flags, 852 const uint8_t bssid[IEEE80211_ADDR_LEN], 853 const uint8_t mac[IEEE80211_ADDR_LEN]) 854 { 855 struct iwn_vap *ivp; 856 struct ieee80211vap *vap; 857 858 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 859 return NULL; 860 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 861 M_80211_VAP, M_NOWAIT | M_ZERO); 862 if (ivp == NULL) 863 return NULL; 864 vap = &ivp->iv_vap; 865 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 866 vap->iv_bmissthreshold = 10; /* override default */ 867 /* Override with driver methods. */ 868 ivp->iv_newstate = vap->iv_newstate; 869 vap->iv_newstate = iwn_newstate; 870 871 ieee80211_ratectl_init(vap); 872 /* Complete setup. */ 873 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 874 ic->ic_opmode = opmode; 875 return vap; 876 } 877 878 static void 879 iwn_vap_delete(struct ieee80211vap *vap) 880 { 881 struct iwn_vap *ivp = IWN_VAP(vap); 882 883 ieee80211_ratectl_deinit(vap); 884 ieee80211_vap_detach(vap); 885 free(ivp, M_80211_VAP); 886 } 887 888 static int 889 iwn_detach(device_t dev) 890 { 891 struct iwn_softc *sc = device_get_softc(dev); 892 struct ifnet *ifp = sc->sc_ifp; 893 struct ieee80211com *ic; 894 int qid; 895 896 if (ifp != NULL) { 897 ic = ifp->if_l2com; 898 899 ieee80211_draintask(ic, &sc->sc_reinit_task); 900 ieee80211_draintask(ic, &sc->sc_radioon_task); 901 ieee80211_draintask(ic, &sc->sc_radiooff_task); 902 903 iwn_stop(sc); 904 callout_drain(&sc->watchdog_to); 905 callout_drain(&sc->calib_to); 906 ieee80211_ifdetach(ic); 907 } 908 909 /* Uninstall interrupt handler. */ 910 if (sc->irq != NULL) { 911 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 912 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 913 if (sc->irq_rid == 1) 914 pci_release_msi(dev); 915 } 916 917 /* Free DMA resources. */ 918 iwn_free_rx_ring(sc, &sc->rxq); 919 for (qid = 0; qid < sc->ntxqs; qid++) 920 iwn_free_tx_ring(sc, &sc->txq[qid]); 921 iwn_free_sched(sc); 922 iwn_free_kw(sc); 923 if (sc->ict != NULL) 924 iwn_free_ict(sc); 925 iwn_free_fwmem(sc); 926 927 if (sc->mem != NULL) 928 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 929 930 if (ifp != NULL) 931 if_free(ifp); 932 933 IWN_LOCK_DESTROY(sc); 934 return 0; 935 } 936 937 static int 938 iwn_shutdown(device_t dev) 939 { 940 struct iwn_softc *sc = device_get_softc(dev); 941 942 iwn_stop(sc); 943 return 0; 944 } 945 946 static int 947 iwn_suspend(device_t dev) 948 { 949 struct iwn_softc *sc = device_get_softc(dev); 950 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 951 952 ieee80211_suspend_all(ic); 953 return 0; 954 } 955 956 static int 957 iwn_resume(device_t dev) 958 { 959 struct iwn_softc *sc = device_get_softc(dev); 960 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 961 962 /* Clear device-specific "PCI retry timeout" register (41h). */ 963 pci_write_config(dev, 0x41, 0, 1); 964 965 ieee80211_resume_all(ic); 966 return 0; 967 } 968 969 static int 970 iwn_nic_lock(struct iwn_softc *sc) 971 { 972 int ntries; 973 974 /* Request exclusive access to NIC. */ 975 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 976 977 /* Spin until we actually get the lock. */ 978 for (ntries = 0; ntries < 1000; ntries++) { 979 if ((IWN_READ(sc, IWN_GP_CNTRL) & 980 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 981 IWN_GP_CNTRL_MAC_ACCESS_ENA) 982 return 0; 983 DELAY(10); 984 } 985 return ETIMEDOUT; 986 } 987 988 static __inline void 989 iwn_nic_unlock(struct iwn_softc *sc) 990 { 991 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 992 } 993 994 static __inline uint32_t 995 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 996 { 997 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 998 IWN_BARRIER_READ_WRITE(sc); 999 return IWN_READ(sc, IWN_PRPH_RDATA); 1000 } 1001 1002 static __inline void 1003 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1004 { 1005 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1006 IWN_BARRIER_WRITE(sc); 1007 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1008 } 1009 1010 static __inline void 1011 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1012 { 1013 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1014 } 1015 1016 static __inline void 1017 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1018 { 1019 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1020 } 1021 1022 static __inline void 1023 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1024 const uint32_t *data, int count) 1025 { 1026 for (; count > 0; count--, data++, addr += 4) 1027 iwn_prph_write(sc, addr, *data); 1028 } 1029 1030 static __inline uint32_t 1031 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1032 { 1033 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1034 IWN_BARRIER_READ_WRITE(sc); 1035 return IWN_READ(sc, IWN_MEM_RDATA); 1036 } 1037 1038 static __inline void 1039 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1040 { 1041 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1042 IWN_BARRIER_WRITE(sc); 1043 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1044 } 1045 1046 static __inline void 1047 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1048 { 1049 uint32_t tmp; 1050 1051 tmp = iwn_mem_read(sc, addr & ~3); 1052 if (addr & 3) 1053 tmp = (tmp & 0x0000ffff) | data << 16; 1054 else 1055 tmp = (tmp & 0xffff0000) | data; 1056 iwn_mem_write(sc, addr & ~3, tmp); 1057 } 1058 1059 static __inline void 1060 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1061 int count) 1062 { 1063 for (; count > 0; count--, addr += 4) 1064 *data++ = iwn_mem_read(sc, addr); 1065 } 1066 1067 static __inline void 1068 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1069 int count) 1070 { 1071 for (; count > 0; count--, addr += 4) 1072 iwn_mem_write(sc, addr, val); 1073 } 1074 1075 static int 1076 iwn_eeprom_lock(struct iwn_softc *sc) 1077 { 1078 int i, ntries; 1079 1080 for (i = 0; i < 100; i++) { 1081 /* Request exclusive access to EEPROM. */ 1082 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1083 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1084 1085 /* Spin until we actually get the lock. */ 1086 for (ntries = 0; ntries < 100; ntries++) { 1087 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1088 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1089 return 0; 1090 DELAY(10); 1091 } 1092 } 1093 return ETIMEDOUT; 1094 } 1095 1096 static __inline void 1097 iwn_eeprom_unlock(struct iwn_softc *sc) 1098 { 1099 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1100 } 1101 1102 /* 1103 * Initialize access by host to One Time Programmable ROM. 1104 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1105 */ 1106 static int 1107 iwn_init_otprom(struct iwn_softc *sc) 1108 { 1109 uint16_t prev, base, next; 1110 int count, error; 1111 1112 /* Wait for clock stabilization before accessing prph. */ 1113 if ((error = iwn_clock_wait(sc)) != 0) 1114 return error; 1115 1116 if ((error = iwn_nic_lock(sc)) != 0) 1117 return error; 1118 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1119 DELAY(5); 1120 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1121 iwn_nic_unlock(sc); 1122 1123 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1124 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1125 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1126 IWN_RESET_LINK_PWR_MGMT_DIS); 1127 } 1128 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1129 /* Clear ECC status. */ 1130 IWN_SETBITS(sc, IWN_OTP_GP, 1131 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1132 1133 /* 1134 * Find the block before last block (contains the EEPROM image) 1135 * for HW without OTP shadow RAM. 1136 */ 1137 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1138 /* Switch to absolute addressing mode. */ 1139 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1140 base = prev = 0; 1141 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1142 error = iwn_read_prom_data(sc, base, &next, 2); 1143 if (error != 0) 1144 return error; 1145 if (next == 0) /* End of linked-list. */ 1146 break; 1147 prev = base; 1148 base = le16toh(next); 1149 } 1150 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1151 return EIO; 1152 /* Skip "next" word. */ 1153 sc->prom_base = prev + 1; 1154 } 1155 return 0; 1156 } 1157 1158 static int 1159 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1160 { 1161 uint8_t *out = data; 1162 uint32_t val, tmp; 1163 int ntries; 1164 1165 addr += sc->prom_base; 1166 for (; count > 0; count -= 2, addr++) { 1167 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1168 for (ntries = 0; ntries < 10; ntries++) { 1169 val = IWN_READ(sc, IWN_EEPROM); 1170 if (val & IWN_EEPROM_READ_VALID) 1171 break; 1172 DELAY(5); 1173 } 1174 if (ntries == 10) { 1175 device_printf(sc->sc_dev, 1176 "timeout reading ROM at 0x%x\n", addr); 1177 return ETIMEDOUT; 1178 } 1179 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1180 /* OTPROM, check for ECC errors. */ 1181 tmp = IWN_READ(sc, IWN_OTP_GP); 1182 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1183 device_printf(sc->sc_dev, 1184 "OTPROM ECC error at 0x%x\n", addr); 1185 return EIO; 1186 } 1187 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1188 /* Correctable ECC error, clear bit. */ 1189 IWN_SETBITS(sc, IWN_OTP_GP, 1190 IWN_OTP_GP_ECC_CORR_STTS); 1191 } 1192 } 1193 *out++ = val >> 16; 1194 if (count > 1) 1195 *out++ = val >> 24; 1196 } 1197 return 0; 1198 } 1199 1200 static void 1201 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1202 { 1203 if (error != 0) 1204 return; 1205 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1206 *(bus_addr_t *)arg = segs[0].ds_addr; 1207 } 1208 1209 static int 1210 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1211 void **kvap, bus_size_t size, bus_size_t alignment) 1212 { 1213 int error; 1214 1215 dma->tag = NULL; 1216 dma->size = size; 1217 1218 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1219 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1220 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1221 if (error != 0) 1222 goto fail; 1223 1224 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1225 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1226 if (error != 0) 1227 goto fail; 1228 1229 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1230 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1231 if (error != 0) 1232 goto fail; 1233 1234 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1235 1236 if (kvap != NULL) 1237 *kvap = dma->vaddr; 1238 1239 return 0; 1240 1241 fail: iwn_dma_contig_free(dma); 1242 return error; 1243 } 1244 1245 static void 1246 iwn_dma_contig_free(struct iwn_dma_info *dma) 1247 { 1248 if (dma->map != NULL) { 1249 if (dma->vaddr != NULL) { 1250 bus_dmamap_sync(dma->tag, dma->map, 1251 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1252 bus_dmamap_unload(dma->tag, dma->map); 1253 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); 1254 dma->vaddr = NULL; 1255 } 1256 bus_dmamap_destroy(dma->tag, dma->map); 1257 dma->map = NULL; 1258 } 1259 if (dma->tag != NULL) { 1260 bus_dma_tag_destroy(dma->tag); 1261 dma->tag = NULL; 1262 } 1263 } 1264 1265 static int 1266 iwn_alloc_sched(struct iwn_softc *sc) 1267 { 1268 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1269 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1270 sc->schedsz, 1024); 1271 } 1272 1273 static void 1274 iwn_free_sched(struct iwn_softc *sc) 1275 { 1276 iwn_dma_contig_free(&sc->sched_dma); 1277 } 1278 1279 static int 1280 iwn_alloc_kw(struct iwn_softc *sc) 1281 { 1282 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1283 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1284 } 1285 1286 static void 1287 iwn_free_kw(struct iwn_softc *sc) 1288 { 1289 iwn_dma_contig_free(&sc->kw_dma); 1290 } 1291 1292 static int 1293 iwn_alloc_ict(struct iwn_softc *sc) 1294 { 1295 /* ICT table must be aligned on a 4KB boundary. */ 1296 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1297 IWN_ICT_SIZE, 4096); 1298 } 1299 1300 static void 1301 iwn_free_ict(struct iwn_softc *sc) 1302 { 1303 iwn_dma_contig_free(&sc->ict_dma); 1304 } 1305 1306 static int 1307 iwn_alloc_fwmem(struct iwn_softc *sc) 1308 { 1309 /* Must be aligned on a 16-byte boundary. */ 1310 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1311 } 1312 1313 static void 1314 iwn_free_fwmem(struct iwn_softc *sc) 1315 { 1316 iwn_dma_contig_free(&sc->fw_dma); 1317 } 1318 1319 static int 1320 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1321 { 1322 bus_size_t size; 1323 int i, error; 1324 1325 ring->cur = 0; 1326 1327 /* Allocate RX descriptors (256-byte aligned). */ 1328 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1329 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1330 size, 256); 1331 if (error != 0) { 1332 device_printf(sc->sc_dev, 1333 "%s: could not allocate RX ring DMA memory, error %d\n", 1334 __func__, error); 1335 goto fail; 1336 } 1337 1338 /* Allocate RX status area (16-byte aligned). */ 1339 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1340 sizeof (struct iwn_rx_status), 16); 1341 if (error != 0) { 1342 device_printf(sc->sc_dev, 1343 "%s: could not allocate RX status DMA memory, error %d\n", 1344 __func__, error); 1345 goto fail; 1346 } 1347 1348 /* Create RX buffer DMA tag. */ 1349 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1350 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1351 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1352 &ring->data_dmat); 1353 if (error != 0) { 1354 device_printf(sc->sc_dev, 1355 "%s: could not create RX buf DMA tag, error %d\n", 1356 __func__, error); 1357 goto fail; 1358 } 1359 1360 /* 1361 * Allocate and map RX buffers. 1362 */ 1363 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1364 struct iwn_rx_data *data = &ring->data[i]; 1365 bus_addr_t paddr; 1366 1367 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1368 if (error != 0) { 1369 device_printf(sc->sc_dev, 1370 "%s: could not create RX buf DMA map, error %d\n", 1371 __func__, error); 1372 goto fail; 1373 } 1374 1375 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, 1376 IWN_RBUF_SIZE); 1377 if (data->m == NULL) { 1378 device_printf(sc->sc_dev, 1379 "%s: could not allocate RX mbuf\n", __func__); 1380 error = ENOBUFS; 1381 goto fail; 1382 } 1383 1384 error = bus_dmamap_load(ring->data_dmat, data->map, 1385 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1386 &paddr, BUS_DMA_NOWAIT); 1387 if (error != 0 && error != EFBIG) { 1388 device_printf(sc->sc_dev, 1389 "%s: can't not map mbuf, error %d\n", __func__, 1390 error); 1391 goto fail; 1392 } 1393 1394 /* Set physical address of RX buffer (256-byte aligned). */ 1395 ring->desc[i] = htole32(paddr >> 8); 1396 } 1397 1398 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1399 BUS_DMASYNC_PREWRITE); 1400 1401 return 0; 1402 1403 fail: iwn_free_rx_ring(sc, ring); 1404 return error; 1405 } 1406 1407 static void 1408 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1409 { 1410 int ntries; 1411 1412 if (iwn_nic_lock(sc) == 0) { 1413 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1414 for (ntries = 0; ntries < 1000; ntries++) { 1415 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1416 IWN_FH_RX_STATUS_IDLE) 1417 break; 1418 DELAY(10); 1419 } 1420 iwn_nic_unlock(sc); 1421 } 1422 ring->cur = 0; 1423 sc->last_rx_valid = 0; 1424 } 1425 1426 static void 1427 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1428 { 1429 int i; 1430 1431 iwn_dma_contig_free(&ring->desc_dma); 1432 iwn_dma_contig_free(&ring->stat_dma); 1433 1434 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1435 struct iwn_rx_data *data = &ring->data[i]; 1436 1437 if (data->m != NULL) { 1438 bus_dmamap_sync(ring->data_dmat, data->map, 1439 BUS_DMASYNC_POSTREAD); 1440 bus_dmamap_unload(ring->data_dmat, data->map); 1441 m_freem(data->m); 1442 data->m = NULL; 1443 } 1444 if (data->map != NULL) 1445 bus_dmamap_destroy(ring->data_dmat, data->map); 1446 } 1447 if (ring->data_dmat != NULL) { 1448 bus_dma_tag_destroy(ring->data_dmat); 1449 ring->data_dmat = NULL; 1450 } 1451 } 1452 1453 static int 1454 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1455 { 1456 bus_addr_t paddr; 1457 bus_size_t size; 1458 int i, error; 1459 1460 ring->qid = qid; 1461 ring->queued = 0; 1462 ring->cur = 0; 1463 1464 /* Allocate TX descriptors (256-byte aligned). */ 1465 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1466 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1467 size, 256); 1468 if (error != 0) { 1469 device_printf(sc->sc_dev, 1470 "%s: could not allocate TX ring DMA memory, error %d\n", 1471 __func__, error); 1472 goto fail; 1473 } 1474 1475 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1476 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1477 size, 4); 1478 if (error != 0) { 1479 device_printf(sc->sc_dev, 1480 "%s: could not allocate TX cmd DMA memory, error %d\n", 1481 __func__, error); 1482 goto fail; 1483 } 1484 1485 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1486 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1487 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1488 &ring->data_dmat); 1489 if (error != 0) { 1490 device_printf(sc->sc_dev, 1491 "%s: could not create TX buf DMA tag, error %d\n", 1492 __func__, error); 1493 goto fail; 1494 } 1495 1496 paddr = ring->cmd_dma.paddr; 1497 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1498 struct iwn_tx_data *data = &ring->data[i]; 1499 1500 data->cmd_paddr = paddr; 1501 data->scratch_paddr = paddr + 12; 1502 paddr += sizeof (struct iwn_tx_cmd); 1503 1504 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1505 if (error != 0) { 1506 device_printf(sc->sc_dev, 1507 "%s: could not create TX buf DMA map, error %d\n", 1508 __func__, error); 1509 goto fail; 1510 } 1511 } 1512 return 0; 1513 1514 fail: iwn_free_tx_ring(sc, ring); 1515 return error; 1516 } 1517 1518 static void 1519 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1520 { 1521 int i; 1522 1523 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1524 struct iwn_tx_data *data = &ring->data[i]; 1525 1526 if (data->m != NULL) { 1527 bus_dmamap_sync(ring->data_dmat, data->map, 1528 BUS_DMASYNC_POSTWRITE); 1529 bus_dmamap_unload(ring->data_dmat, data->map); 1530 m_freem(data->m); 1531 data->m = NULL; 1532 } 1533 } 1534 /* Clear TX descriptors. */ 1535 memset(ring->desc, 0, ring->desc_dma.size); 1536 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1537 BUS_DMASYNC_PREWRITE); 1538 sc->qfullmsk &= ~(1 << ring->qid); 1539 ring->queued = 0; 1540 ring->cur = 0; 1541 } 1542 1543 static void 1544 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1545 { 1546 int i; 1547 1548 iwn_dma_contig_free(&ring->desc_dma); 1549 iwn_dma_contig_free(&ring->cmd_dma); 1550 1551 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1552 struct iwn_tx_data *data = &ring->data[i]; 1553 1554 if (data->m != NULL) { 1555 bus_dmamap_sync(ring->data_dmat, data->map, 1556 BUS_DMASYNC_POSTWRITE); 1557 bus_dmamap_unload(ring->data_dmat, data->map); 1558 m_freem(data->m); 1559 } 1560 if (data->map != NULL) 1561 bus_dmamap_destroy(ring->data_dmat, data->map); 1562 } 1563 if (ring->data_dmat != NULL) { 1564 bus_dma_tag_destroy(ring->data_dmat); 1565 ring->data_dmat = NULL; 1566 } 1567 } 1568 1569 static void 1570 iwn5000_ict_reset(struct iwn_softc *sc) 1571 { 1572 /* Disable interrupts. */ 1573 IWN_WRITE(sc, IWN_INT_MASK, 0); 1574 1575 /* Reset ICT table. */ 1576 memset(sc->ict, 0, IWN_ICT_SIZE); 1577 sc->ict_cur = 0; 1578 1579 /* Set physical address of ICT table (4KB aligned). */ 1580 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1581 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1582 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1583 1584 /* Enable periodic RX interrupt. */ 1585 sc->int_mask |= IWN_INT_RX_PERIODIC; 1586 /* Switch to ICT interrupt mode in driver. */ 1587 sc->sc_flags |= IWN_FLAG_USE_ICT; 1588 1589 /* Re-enable interrupts. */ 1590 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1591 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1592 } 1593 1594 static int 1595 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1596 { 1597 struct iwn_ops *ops = &sc->ops; 1598 uint16_t val; 1599 int error; 1600 1601 /* Check whether adapter has an EEPROM or an OTPROM. */ 1602 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1603 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1604 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1605 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1606 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1607 1608 /* Adapter has to be powered on for EEPROM access to work. */ 1609 if ((error = iwn_apm_init(sc)) != 0) { 1610 device_printf(sc->sc_dev, 1611 "%s: could not power ON adapter, error %d\n", __func__, 1612 error); 1613 return error; 1614 } 1615 1616 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1617 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1618 return EIO; 1619 } 1620 if ((error = iwn_eeprom_lock(sc)) != 0) { 1621 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 1622 __func__, error); 1623 return error; 1624 } 1625 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1626 if ((error = iwn_init_otprom(sc)) != 0) { 1627 device_printf(sc->sc_dev, 1628 "%s: could not initialize OTPROM, error %d\n", 1629 __func__, error); 1630 return error; 1631 } 1632 } 1633 1634 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1635 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 1636 /* Check if HT support is bonded out. */ 1637 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1638 sc->sc_flags |= IWN_FLAG_HAS_11N; 1639 1640 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1641 sc->rfcfg = le16toh(val); 1642 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1643 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1644 if (sc->txchainmask == 0) 1645 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1646 if (sc->rxchainmask == 0) 1647 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1648 1649 /* Read MAC address. */ 1650 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1651 1652 /* Read adapter-specific information from EEPROM. */ 1653 ops->read_eeprom(sc); 1654 1655 iwn_apm_stop(sc); /* Power OFF adapter. */ 1656 1657 iwn_eeprom_unlock(sc); 1658 return 0; 1659 } 1660 1661 static void 1662 iwn4965_read_eeprom(struct iwn_softc *sc) 1663 { 1664 uint32_t addr; 1665 uint16_t val; 1666 int i; 1667 1668 /* Read regulatory domain (4 ASCII characters). */ 1669 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1670 1671 /* Read the list of authorized channels (20MHz ones only). */ 1672 for (i = 0; i < 7; i++) { 1673 addr = iwn4965_regulatory_bands[i]; 1674 iwn_read_eeprom_channels(sc, i, addr); 1675 } 1676 1677 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1678 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1679 sc->maxpwr2GHz = val & 0xff; 1680 sc->maxpwr5GHz = val >> 8; 1681 /* Check that EEPROM values are within valid range. */ 1682 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1683 sc->maxpwr5GHz = 38; 1684 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1685 sc->maxpwr2GHz = 38; 1686 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1687 sc->maxpwr2GHz, sc->maxpwr5GHz); 1688 1689 /* Read samples for each TX power group. */ 1690 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1691 sizeof sc->bands); 1692 1693 /* Read voltage at which samples were taken. */ 1694 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1695 sc->eeprom_voltage = (int16_t)le16toh(val); 1696 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1697 sc->eeprom_voltage); 1698 1699 #ifdef IWN_DEBUG 1700 /* Print samples. */ 1701 if (sc->sc_debug & IWN_DEBUG_ANY) { 1702 for (i = 0; i < IWN_NBANDS; i++) 1703 iwn4965_print_power_group(sc, i); 1704 } 1705 #endif 1706 } 1707 1708 #ifdef IWN_DEBUG 1709 static void 1710 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1711 { 1712 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1713 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1714 int j, c; 1715 1716 printf("===band %d===\n", i); 1717 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1718 printf("chan1 num=%d\n", chans[0].num); 1719 for (c = 0; c < 2; c++) { 1720 for (j = 0; j < IWN_NSAMPLES; j++) { 1721 printf("chain %d, sample %d: temp=%d gain=%d " 1722 "power=%d pa_det=%d\n", c, j, 1723 chans[0].samples[c][j].temp, 1724 chans[0].samples[c][j].gain, 1725 chans[0].samples[c][j].power, 1726 chans[0].samples[c][j].pa_det); 1727 } 1728 } 1729 printf("chan2 num=%d\n", chans[1].num); 1730 for (c = 0; c < 2; c++) { 1731 for (j = 0; j < IWN_NSAMPLES; j++) { 1732 printf("chain %d, sample %d: temp=%d gain=%d " 1733 "power=%d pa_det=%d\n", c, j, 1734 chans[1].samples[c][j].temp, 1735 chans[1].samples[c][j].gain, 1736 chans[1].samples[c][j].power, 1737 chans[1].samples[c][j].pa_det); 1738 } 1739 } 1740 } 1741 #endif 1742 1743 static void 1744 iwn5000_read_eeprom(struct iwn_softc *sc) 1745 { 1746 struct iwn5000_eeprom_calib_hdr hdr; 1747 int32_t volt; 1748 uint32_t base, addr; 1749 uint16_t val; 1750 int i; 1751 1752 /* Read regulatory domain (4 ASCII characters). */ 1753 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1754 base = le16toh(val); 1755 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1756 sc->eeprom_domain, 4); 1757 1758 /* Read the list of authorized channels (20MHz ones only). */ 1759 for (i = 0; i < 7; i++) { 1760 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1761 addr = base + iwn6000_regulatory_bands[i]; 1762 else 1763 addr = base + iwn5000_regulatory_bands[i]; 1764 iwn_read_eeprom_channels(sc, i, addr); 1765 } 1766 1767 /* Read enhanced TX power information for 6000 Series. */ 1768 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1769 iwn_read_eeprom_enhinfo(sc); 1770 1771 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1772 base = le16toh(val); 1773 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1774 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1775 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 1776 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1777 sc->calib_ver = hdr.version; 1778 1779 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1780 /* Compute temperature offset. */ 1781 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1782 sc->eeprom_temp = le16toh(val); 1783 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1784 volt = le16toh(val); 1785 sc->temp_off = sc->eeprom_temp - (volt / -5); 1786 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1787 sc->eeprom_temp, volt, sc->temp_off); 1788 } else { 1789 /* Read crystal calibration. */ 1790 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1791 &sc->eeprom_crystal, sizeof (uint32_t)); 1792 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 1793 le32toh(sc->eeprom_crystal)); 1794 } 1795 } 1796 1797 /* 1798 * Translate EEPROM flags to net80211. 1799 */ 1800 static uint32_t 1801 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1802 { 1803 uint32_t nflags; 1804 1805 nflags = 0; 1806 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1807 nflags |= IEEE80211_CHAN_PASSIVE; 1808 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1809 nflags |= IEEE80211_CHAN_NOADHOC; 1810 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1811 nflags |= IEEE80211_CHAN_DFS; 1812 /* XXX apparently IBSS may still be marked */ 1813 nflags |= IEEE80211_CHAN_NOADHOC; 1814 } 1815 1816 return nflags; 1817 } 1818 1819 static void 1820 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1821 { 1822 struct ifnet *ifp = sc->sc_ifp; 1823 struct ieee80211com *ic = ifp->if_l2com; 1824 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1825 const struct iwn_chan_band *band = &iwn_bands[n]; 1826 struct ieee80211_channel *c; 1827 uint8_t chan; 1828 int i, nflags; 1829 1830 for (i = 0; i < band->nchan; i++) { 1831 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1832 DPRINTF(sc, IWN_DEBUG_RESET, 1833 "skip chan %d flags 0x%x maxpwr %d\n", 1834 band->chan[i], channels[i].flags, 1835 channels[i].maxpwr); 1836 continue; 1837 } 1838 chan = band->chan[i]; 1839 nflags = iwn_eeprom_channel_flags(&channels[i]); 1840 1841 c = &ic->ic_channels[ic->ic_nchans++]; 1842 c->ic_ieee = chan; 1843 c->ic_maxregpower = channels[i].maxpwr; 1844 c->ic_maxpower = 2*c->ic_maxregpower; 1845 1846 if (n == 0) { /* 2GHz band */ 1847 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 1848 /* G =>'s B is supported */ 1849 c->ic_flags = IEEE80211_CHAN_B | nflags; 1850 c = &ic->ic_channels[ic->ic_nchans++]; 1851 c[0] = c[-1]; 1852 c->ic_flags = IEEE80211_CHAN_G | nflags; 1853 } else { /* 5GHz band */ 1854 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 1855 c->ic_flags = IEEE80211_CHAN_A | nflags; 1856 } 1857 1858 /* Save maximum allowed TX power for this channel. */ 1859 sc->maxpwr[chan] = channels[i].maxpwr; 1860 1861 DPRINTF(sc, IWN_DEBUG_RESET, 1862 "add chan %d flags 0x%x maxpwr %d\n", chan, 1863 channels[i].flags, channels[i].maxpwr); 1864 1865 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 1866 /* add HT20, HT40 added separately */ 1867 c = &ic->ic_channels[ic->ic_nchans++]; 1868 c[0] = c[-1]; 1869 c->ic_flags |= IEEE80211_CHAN_HT20; 1870 } 1871 } 1872 } 1873 1874 static void 1875 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1876 { 1877 struct ifnet *ifp = sc->sc_ifp; 1878 struct ieee80211com *ic = ifp->if_l2com; 1879 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1880 const struct iwn_chan_band *band = &iwn_bands[n]; 1881 struct ieee80211_channel *c, *cent, *extc; 1882 uint8_t chan; 1883 int i, nflags; 1884 1885 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) 1886 return; 1887 1888 for (i = 0; i < band->nchan; i++) { 1889 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1890 DPRINTF(sc, IWN_DEBUG_RESET, 1891 "skip chan %d flags 0x%x maxpwr %d\n", 1892 band->chan[i], channels[i].flags, 1893 channels[i].maxpwr); 1894 continue; 1895 } 1896 chan = band->chan[i]; 1897 nflags = iwn_eeprom_channel_flags(&channels[i]); 1898 1899 /* 1900 * Each entry defines an HT40 channel pair; find the 1901 * center channel, then the extension channel above. 1902 */ 1903 cent = ieee80211_find_channel_byieee(ic, chan, 1904 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 1905 if (cent == NULL) { /* XXX shouldn't happen */ 1906 device_printf(sc->sc_dev, 1907 "%s: no entry for channel %d\n", __func__, chan); 1908 continue; 1909 } 1910 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1911 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 1912 if (extc == NULL) { 1913 DPRINTF(sc, IWN_DEBUG_RESET, 1914 "%s: skip chan %d, extension channel not found\n", 1915 __func__, chan); 1916 continue; 1917 } 1918 1919 DPRINTF(sc, IWN_DEBUG_RESET, 1920 "add ht40 chan %d flags 0x%x maxpwr %d\n", 1921 chan, channels[i].flags, channels[i].maxpwr); 1922 1923 c = &ic->ic_channels[ic->ic_nchans++]; 1924 c[0] = cent[0]; 1925 c->ic_extieee = extc->ic_ieee; 1926 c->ic_flags &= ~IEEE80211_CHAN_HT; 1927 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 1928 c = &ic->ic_channels[ic->ic_nchans++]; 1929 c[0] = extc[0]; 1930 c->ic_extieee = cent->ic_ieee; 1931 c->ic_flags &= ~IEEE80211_CHAN_HT; 1932 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 1933 } 1934 } 1935 1936 static void 1937 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1938 { 1939 struct ifnet *ifp = sc->sc_ifp; 1940 struct ieee80211com *ic = ifp->if_l2com; 1941 1942 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 1943 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 1944 1945 if (n < 5) 1946 iwn_read_eeprom_band(sc, n); 1947 else 1948 iwn_read_eeprom_ht40(sc, n); 1949 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1950 } 1951 1952 static struct iwn_eeprom_chan * 1953 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 1954 { 1955 int band, chan, i, j; 1956 1957 if (IEEE80211_IS_CHAN_HT40(c)) { 1958 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 1959 if (IEEE80211_IS_CHAN_HT40D(c)) 1960 chan = c->ic_extieee; 1961 else 1962 chan = c->ic_ieee; 1963 for (i = 0; i < iwn_bands[band].nchan; i++) { 1964 if (iwn_bands[band].chan[i] == chan) 1965 return &sc->eeprom_channels[band][i]; 1966 } 1967 } else { 1968 for (j = 0; j < 5; j++) { 1969 for (i = 0; i < iwn_bands[j].nchan; i++) { 1970 if (iwn_bands[j].chan[i] == c->ic_ieee) 1971 return &sc->eeprom_channels[j][i]; 1972 } 1973 } 1974 } 1975 return NULL; 1976 } 1977 1978 /* 1979 * Enforce flags read from EEPROM. 1980 */ 1981 static int 1982 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1983 int nchan, struct ieee80211_channel chans[]) 1984 { 1985 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1986 int i; 1987 1988 for (i = 0; i < nchan; i++) { 1989 struct ieee80211_channel *c = &chans[i]; 1990 struct iwn_eeprom_chan *channel; 1991 1992 channel = iwn_find_eeprom_channel(sc, c); 1993 if (channel == NULL) { 1994 if_printf(ic->ic_ifp, 1995 "%s: invalid channel %u freq %u/0x%x\n", 1996 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1997 return EINVAL; 1998 } 1999 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2000 } 2001 2002 return 0; 2003 } 2004 2005 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0])) 2006 2007 static void 2008 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2009 { 2010 struct iwn_eeprom_enhinfo enhinfo[35]; 2011 struct ifnet *ifp = sc->sc_ifp; 2012 struct ieee80211com *ic = ifp->if_l2com; 2013 struct ieee80211_channel *c; 2014 uint16_t val, base; 2015 int8_t maxpwr; 2016 uint8_t flags; 2017 int i, j; 2018 2019 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2020 base = le16toh(val); 2021 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2022 enhinfo, sizeof enhinfo); 2023 2024 for (i = 0; i < nitems(enhinfo); i++) { 2025 flags = enhinfo[i].flags; 2026 if (!(flags & IWN_ENHINFO_VALID)) 2027 continue; /* Skip invalid entries. */ 2028 2029 maxpwr = 0; 2030 if (sc->txchainmask & IWN_ANT_A) 2031 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2032 if (sc->txchainmask & IWN_ANT_B) 2033 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2034 if (sc->txchainmask & IWN_ANT_C) 2035 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2036 if (sc->ntxchains == 2) 2037 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2038 else if (sc->ntxchains == 3) 2039 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2040 2041 for (j = 0; j < ic->ic_nchans; j++) { 2042 c = &ic->ic_channels[j]; 2043 if ((flags & IWN_ENHINFO_5GHZ)) { 2044 if (!IEEE80211_IS_CHAN_A(c)) 2045 continue; 2046 } else if ((flags & IWN_ENHINFO_OFDM)) { 2047 if (!IEEE80211_IS_CHAN_G(c)) 2048 continue; 2049 } else if (!IEEE80211_IS_CHAN_B(c)) 2050 continue; 2051 if ((flags & IWN_ENHINFO_HT40)) { 2052 if (!IEEE80211_IS_CHAN_HT40(c)) 2053 continue; 2054 } else { 2055 if (IEEE80211_IS_CHAN_HT40(c)) 2056 continue; 2057 } 2058 if (enhinfo[i].chan != 0 && 2059 enhinfo[i].chan != c->ic_ieee) 2060 continue; 2061 2062 DPRINTF(sc, IWN_DEBUG_RESET, 2063 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2064 c->ic_flags, maxpwr / 2); 2065 c->ic_maxregpower = maxpwr / 2; 2066 c->ic_maxpower = maxpwr; 2067 } 2068 } 2069 } 2070 2071 static struct ieee80211_node * 2072 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2073 { 2074 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2075 } 2076 2077 static __inline int 2078 rate2plcp(int rate) 2079 { 2080 switch (rate & 0xff) { 2081 case 12: return 0xd; 2082 case 18: return 0xf; 2083 case 24: return 0x5; 2084 case 36: return 0x7; 2085 case 48: return 0x9; 2086 case 72: return 0xb; 2087 case 96: return 0x1; 2088 case 108: return 0x3; 2089 case 2: return 10; 2090 case 4: return 20; 2091 case 11: return 55; 2092 case 22: return 110; 2093 } 2094 return 0; 2095 } 2096 2097 static void 2098 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2099 { 2100 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2101 struct ieee80211com *ic = ni->ni_ic; 2102 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2103 struct iwn_node *wn = (void *)ni; 2104 uint8_t txant1, txant2; 2105 int i, plcp, rate, ridx; 2106 2107 /* Use the first valid TX antenna. */ 2108 txant1 = IWN_LSB(sc->txchainmask); 2109 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2110 2111 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 2112 ridx = ni->ni_rates.rs_nrates - 1; 2113 for (i = ni->ni_htrates.rs_nrates - 1; i >= 0; i--) { 2114 plcp = RV(ni->ni_htrates.rs_rates[i]) | IWN_RFLAG_MCS; 2115 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2116 plcp |= IWN_RFLAG_HT40; 2117 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2118 plcp |= IWN_RFLAG_SGI; 2119 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) 2120 plcp |= IWN_RFLAG_SGI; 2121 if (RV(ni->ni_htrates.rs_rates[i]) > 7) 2122 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2123 else 2124 plcp |= IWN_RFLAG_ANT(txant1); 2125 if (ridx >= 0) { 2126 rate = RV(ni->ni_rates.rs_rates[ridx]); 2127 wn->ridx[rate] = plcp; 2128 } 2129 wn->ridx[IEEE80211_RATE_MCS | i] = plcp; 2130 ridx--; 2131 } 2132 } else { 2133 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 2134 rate = RV(ni->ni_rates.rs_rates[i]); 2135 plcp = rate2plcp(rate); 2136 ridx = ic->ic_rt->rateCodeToIndex[rate]; 2137 if (ridx < IWN_RIDX_OFDM6 && 2138 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2139 plcp |= IWN_RFLAG_CCK; 2140 plcp |= IWN_RFLAG_ANT(txant1); 2141 wn->ridx[rate] = htole32(plcp); 2142 } 2143 } 2144 #undef RV 2145 } 2146 2147 static int 2148 iwn_media_change(struct ifnet *ifp) 2149 { 2150 int error; 2151 2152 error = ieee80211_media_change(ifp); 2153 /* NB: only the fixed rate can change and that doesn't need a reset */ 2154 return (error == ENETRESET ? 0 : error); 2155 } 2156 2157 static int 2158 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2159 { 2160 struct iwn_vap *ivp = IWN_VAP(vap); 2161 struct ieee80211com *ic = vap->iv_ic; 2162 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2163 int error = 0; 2164 2165 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2166 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2167 2168 IEEE80211_UNLOCK(ic); 2169 IWN_LOCK(sc); 2170 callout_stop(&sc->calib_to); 2171 2172 switch (nstate) { 2173 case IEEE80211_S_ASSOC: 2174 if (vap->iv_state != IEEE80211_S_RUN) 2175 break; 2176 /* FALLTHROUGH */ 2177 case IEEE80211_S_AUTH: 2178 if (vap->iv_state == IEEE80211_S_AUTH) 2179 break; 2180 2181 /* 2182 * !AUTH -> AUTH transition requires state reset to handle 2183 * reassociations correctly. 2184 */ 2185 sc->rxon.associd = 0; 2186 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 2187 sc->calib.state = IWN_CALIB_STATE_INIT; 2188 2189 if ((error = iwn_auth(sc, vap)) != 0) { 2190 device_printf(sc->sc_dev, 2191 "%s: could not move to auth state\n", __func__); 2192 } 2193 break; 2194 2195 case IEEE80211_S_RUN: 2196 /* 2197 * RUN -> RUN transition; Just restart the timers. 2198 */ 2199 if (vap->iv_state == IEEE80211_S_RUN) { 2200 sc->calib_cnt = 0; 2201 break; 2202 } 2203 2204 /* 2205 * !RUN -> RUN requires setting the association id 2206 * which is done with a firmware cmd. We also defer 2207 * starting the timers until that work is done. 2208 */ 2209 if ((error = iwn_run(sc, vap)) != 0) { 2210 device_printf(sc->sc_dev, 2211 "%s: could not move to run state\n", __func__); 2212 } 2213 break; 2214 2215 case IEEE80211_S_INIT: 2216 sc->calib.state = IWN_CALIB_STATE_INIT; 2217 break; 2218 2219 default: 2220 break; 2221 } 2222 IWN_UNLOCK(sc); 2223 IEEE80211_LOCK(ic); 2224 if (error != 0) 2225 return error; 2226 return ivp->iv_newstate(vap, nstate, arg); 2227 } 2228 2229 static void 2230 iwn_calib_timeout(void *arg) 2231 { 2232 struct iwn_softc *sc = arg; 2233 2234 IWN_LOCK_ASSERT(sc); 2235 2236 /* Force automatic TX power calibration every 60 secs. */ 2237 if (++sc->calib_cnt >= 120) { 2238 uint32_t flags = 0; 2239 2240 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2241 "sending request for statistics"); 2242 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2243 sizeof flags, 1); 2244 sc->calib_cnt = 0; 2245 } 2246 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2247 sc); 2248 } 2249 2250 /* 2251 * Process an RX_PHY firmware notification. This is usually immediately 2252 * followed by an MPDU_RX_DONE notification. 2253 */ 2254 static void 2255 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2256 struct iwn_rx_data *data) 2257 { 2258 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2259 2260 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2261 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2262 2263 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2264 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2265 sc->last_rx_valid = 1; 2266 } 2267 2268 /* 2269 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2270 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2271 */ 2272 static void 2273 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2274 struct iwn_rx_data *data) 2275 { 2276 struct iwn_ops *ops = &sc->ops; 2277 struct ifnet *ifp = sc->sc_ifp; 2278 struct ieee80211com *ic = ifp->if_l2com; 2279 struct iwn_rx_ring *ring = &sc->rxq; 2280 struct ieee80211_frame *wh; 2281 struct ieee80211_node *ni; 2282 struct mbuf *m, *m1; 2283 struct iwn_rx_stat *stat; 2284 caddr_t head; 2285 bus_addr_t paddr; 2286 uint32_t flags; 2287 int error, len, rssi, nf; 2288 2289 if (desc->type == IWN_MPDU_RX_DONE) { 2290 /* Check for prior RX_PHY notification. */ 2291 if (!sc->last_rx_valid) { 2292 DPRINTF(sc, IWN_DEBUG_ANY, 2293 "%s: missing RX_PHY\n", __func__); 2294 return; 2295 } 2296 stat = &sc->last_rx_stat; 2297 } else 2298 stat = (struct iwn_rx_stat *)(desc + 1); 2299 2300 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2301 2302 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2303 device_printf(sc->sc_dev, 2304 "%s: invalid RX statistic header, len %d\n", __func__, 2305 stat->cfg_phy_len); 2306 return; 2307 } 2308 if (desc->type == IWN_MPDU_RX_DONE) { 2309 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2310 head = (caddr_t)(mpdu + 1); 2311 len = le16toh(mpdu->len); 2312 } else { 2313 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2314 len = le16toh(stat->len); 2315 } 2316 2317 flags = le32toh(*(uint32_t *)(head + len)); 2318 2319 /* Discard frames with a bad FCS early. */ 2320 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2321 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2322 __func__, flags); 2323 ifp->if_ierrors++; 2324 return; 2325 } 2326 /* Discard frames that are too short. */ 2327 if (len < sizeof (*wh)) { 2328 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2329 __func__, len); 2330 ifp->if_ierrors++; 2331 return; 2332 } 2333 2334 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2335 if (m1 == NULL) { 2336 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2337 __func__); 2338 ifp->if_ierrors++; 2339 return; 2340 } 2341 bus_dmamap_unload(ring->data_dmat, data->map); 2342 2343 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2344 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2345 if (error != 0 && error != EFBIG) { 2346 device_printf(sc->sc_dev, 2347 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2348 m_freem(m1); 2349 2350 /* Try to reload the old mbuf. */ 2351 error = bus_dmamap_load(ring->data_dmat, data->map, 2352 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2353 &paddr, BUS_DMA_NOWAIT); 2354 if (error != 0 && error != EFBIG) { 2355 panic("%s: could not load old RX mbuf", __func__); 2356 } 2357 /* Physical address may have changed. */ 2358 ring->desc[ring->cur] = htole32(paddr >> 8); 2359 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2360 BUS_DMASYNC_PREWRITE); 2361 ifp->if_ierrors++; 2362 return; 2363 } 2364 2365 m = data->m; 2366 data->m = m1; 2367 /* Update RX descriptor. */ 2368 ring->desc[ring->cur] = htole32(paddr >> 8); 2369 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2370 BUS_DMASYNC_PREWRITE); 2371 2372 /* Finalize mbuf. */ 2373 m->m_pkthdr.rcvif = ifp; 2374 m->m_data = head; 2375 m->m_pkthdr.len = m->m_len = len; 2376 2377 /* Grab a reference to the source node. */ 2378 wh = mtod(m, struct ieee80211_frame *); 2379 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2380 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2381 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2382 2383 rssi = ops->get_rssi(sc, stat); 2384 2385 if (ieee80211_radiotap_active(ic)) { 2386 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2387 2388 tap->wr_flags = 0; 2389 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2390 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2391 tap->wr_dbm_antsignal = (int8_t)rssi; 2392 tap->wr_dbm_antnoise = (int8_t)nf; 2393 tap->wr_tsft = stat->tstamp; 2394 switch (stat->rate) { 2395 /* CCK rates. */ 2396 case 10: tap->wr_rate = 2; break; 2397 case 20: tap->wr_rate = 4; break; 2398 case 55: tap->wr_rate = 11; break; 2399 case 110: tap->wr_rate = 22; break; 2400 /* OFDM rates. */ 2401 case 0xd: tap->wr_rate = 12; break; 2402 case 0xf: tap->wr_rate = 18; break; 2403 case 0x5: tap->wr_rate = 24; break; 2404 case 0x7: tap->wr_rate = 36; break; 2405 case 0x9: tap->wr_rate = 48; break; 2406 case 0xb: tap->wr_rate = 72; break; 2407 case 0x1: tap->wr_rate = 96; break; 2408 case 0x3: tap->wr_rate = 108; break; 2409 /* Unknown rate: should not happen. */ 2410 default: tap->wr_rate = 0; 2411 } 2412 } 2413 2414 IWN_UNLOCK(sc); 2415 2416 /* Send the frame to the 802.11 layer. */ 2417 if (ni != NULL) { 2418 if (ni->ni_flags & IEEE80211_NODE_HT) 2419 m->m_flags |= M_AMPDU; 2420 (void)ieee80211_input(ni, m, rssi - nf, nf); 2421 /* Node is no longer needed. */ 2422 ieee80211_free_node(ni); 2423 } else 2424 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 2425 2426 IWN_LOCK(sc); 2427 } 2428 2429 /* Process an incoming Compressed BlockAck. */ 2430 static void 2431 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2432 struct iwn_rx_data *data) 2433 { 2434 struct ifnet *ifp = sc->sc_ifp; 2435 struct iwn_node *wn; 2436 struct ieee80211_node *ni; 2437 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2438 struct iwn_tx_ring *txq; 2439 struct ieee80211_tx_ampdu *tap; 2440 uint64_t bitmap; 2441 uint8_t tid; 2442 int ackfailcnt = 0, i, shift; 2443 2444 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2445 2446 txq = &sc->txq[le16toh(ba->qid)]; 2447 tap = sc->qid2tap[le16toh(ba->qid)]; 2448 tid = WME_AC_TO_TID(tap->txa_ac); 2449 ni = tap->txa_ni; 2450 wn = (void *)ni; 2451 2452 if (wn->agg[tid].bitmap == 0) 2453 return; 2454 2455 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 2456 if (shift < 0) 2457 shift += 0x100; 2458 2459 if (wn->agg[tid].nframes > (64 - shift)) 2460 return; 2461 2462 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 2463 for (i = 0; bitmap; i++) { 2464 if ((bitmap & 1) == 0) { 2465 ifp->if_oerrors++; 2466 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2467 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2468 } else { 2469 ifp->if_opackets++; 2470 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2471 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2472 } 2473 bitmap >>= 1; 2474 } 2475 } 2476 2477 /* 2478 * Process a CALIBRATION_RESULT notification sent by the initialization 2479 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2480 */ 2481 static void 2482 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2483 struct iwn_rx_data *data) 2484 { 2485 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2486 int len, idx = -1; 2487 2488 /* Runtime firmware should not send such a notification. */ 2489 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2490 return; 2491 2492 len = (le32toh(desc->len) & 0x3fff) - 4; 2493 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2494 2495 switch (calib->code) { 2496 case IWN5000_PHY_CALIB_DC: 2497 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 && 2498 (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2499 sc->hw_type >= IWN_HW_REV_TYPE_6000) && 2500 sc->hw_type != IWN_HW_REV_TYPE_6050) 2501 idx = 0; 2502 break; 2503 case IWN5000_PHY_CALIB_LO: 2504 idx = 1; 2505 break; 2506 case IWN5000_PHY_CALIB_TX_IQ: 2507 idx = 2; 2508 break; 2509 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2510 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2511 sc->hw_type != IWN_HW_REV_TYPE_5150) 2512 idx = 3; 2513 break; 2514 case IWN5000_PHY_CALIB_BASE_BAND: 2515 idx = 4; 2516 break; 2517 } 2518 if (idx == -1) /* Ignore other results. */ 2519 return; 2520 2521 /* Save calibration result. */ 2522 if (sc->calibcmd[idx].buf != NULL) 2523 free(sc->calibcmd[idx].buf, M_DEVBUF); 2524 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2525 if (sc->calibcmd[idx].buf == NULL) { 2526 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2527 "not enough memory for calibration result %d\n", 2528 calib->code); 2529 return; 2530 } 2531 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2532 "saving calibration result code=%d len=%d\n", calib->code, len); 2533 sc->calibcmd[idx].len = len; 2534 memcpy(sc->calibcmd[idx].buf, calib, len); 2535 } 2536 2537 /* 2538 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2539 * The latter is sent by the firmware after each received beacon. 2540 */ 2541 static void 2542 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2543 struct iwn_rx_data *data) 2544 { 2545 struct iwn_ops *ops = &sc->ops; 2546 struct ifnet *ifp = sc->sc_ifp; 2547 struct ieee80211com *ic = ifp->if_l2com; 2548 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2549 struct iwn_calib_state *calib = &sc->calib; 2550 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2551 int temp; 2552 2553 /* Ignore statistics received during a scan. */ 2554 if (vap->iv_state != IEEE80211_S_RUN || 2555 (ic->ic_flags & IEEE80211_F_SCAN)) 2556 return; 2557 2558 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2559 2560 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 2561 __func__, desc->type); 2562 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2563 2564 /* Test if temperature has changed. */ 2565 if (stats->general.temp != sc->rawtemp) { 2566 /* Convert "raw" temperature to degC. */ 2567 sc->rawtemp = stats->general.temp; 2568 temp = ops->get_temperature(sc); 2569 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2570 __func__, temp); 2571 2572 /* Update TX power if need be (4965AGN only). */ 2573 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2574 iwn4965_power_calibration(sc, temp); 2575 } 2576 2577 if (desc->type != IWN_BEACON_STATISTICS) 2578 return; /* Reply to a statistics request. */ 2579 2580 sc->noise = iwn_get_noise(&stats->rx.general); 2581 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2582 2583 /* Test that RSSI and noise are present in stats report. */ 2584 if (le32toh(stats->rx.general.flags) != 1) { 2585 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2586 "received statistics without RSSI"); 2587 return; 2588 } 2589 2590 if (calib->state == IWN_CALIB_STATE_ASSOC) 2591 iwn_collect_noise(sc, &stats->rx.general); 2592 else if (calib->state == IWN_CALIB_STATE_RUN) 2593 iwn_tune_sensitivity(sc, &stats->rx); 2594 } 2595 2596 /* 2597 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2598 * and 5000 adapters have different incompatible TX status formats. 2599 */ 2600 static void 2601 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2602 struct iwn_rx_data *data) 2603 { 2604 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2605 struct iwn_tx_ring *ring; 2606 int qid; 2607 2608 qid = desc->qid & 0xf; 2609 ring = &sc->txq[qid]; 2610 2611 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2612 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2613 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2614 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2615 le32toh(stat->status)); 2616 2617 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2618 if (qid >= sc->firstaggqueue) { 2619 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2620 &stat->status); 2621 } else { 2622 iwn_tx_done(sc, desc, stat->ackfailcnt, 2623 le32toh(stat->status) & 0xff); 2624 } 2625 } 2626 2627 static void 2628 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2629 struct iwn_rx_data *data) 2630 { 2631 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2632 struct iwn_tx_ring *ring; 2633 int qid; 2634 2635 qid = desc->qid & 0xf; 2636 ring = &sc->txq[qid]; 2637 2638 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2639 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2640 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2641 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2642 le32toh(stat->status)); 2643 2644 #ifdef notyet 2645 /* Reset TX scheduler slot. */ 2646 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2647 #endif 2648 2649 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2650 if (qid >= sc->firstaggqueue) { 2651 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2652 &stat->status); 2653 } else { 2654 iwn_tx_done(sc, desc, stat->ackfailcnt, 2655 le16toh(stat->status) & 0xff); 2656 } 2657 } 2658 2659 /* 2660 * Adapter-independent backend for TX_DONE firmware notifications. 2661 */ 2662 static void 2663 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2664 uint8_t status) 2665 { 2666 struct ifnet *ifp = sc->sc_ifp; 2667 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2668 struct iwn_tx_data *data = &ring->data[desc->idx]; 2669 struct mbuf *m; 2670 struct ieee80211_node *ni; 2671 struct ieee80211vap *vap; 2672 2673 KASSERT(data->ni != NULL, ("no node")); 2674 2675 /* Unmap and free mbuf. */ 2676 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2677 bus_dmamap_unload(ring->data_dmat, data->map); 2678 m = data->m, data->m = NULL; 2679 ni = data->ni, data->ni = NULL; 2680 vap = ni->ni_vap; 2681 2682 if (m->m_flags & M_TXCB) { 2683 /* 2684 * Channels marked for "radar" require traffic to be received 2685 * to unlock before we can transmit. Until traffic is seen 2686 * any attempt to transmit is returned immediately with status 2687 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2688 * happen on first authenticate after scanning. To workaround 2689 * this we ignore a failure of this sort in AUTH state so the 2690 * 802.11 layer will fall back to using a timeout to wait for 2691 * the AUTH reply. This allows the firmware time to see 2692 * traffic so a subsequent retry of AUTH succeeds. It's 2693 * unclear why the firmware does not maintain state for 2694 * channels recently visited as this would allow immediate 2695 * use of the channel after a scan (where we see traffic). 2696 */ 2697 if (status == IWN_TX_FAIL_TX_LOCKED && 2698 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2699 ieee80211_process_callback(ni, m, 0); 2700 else 2701 ieee80211_process_callback(ni, m, 2702 (status & IWN_TX_FAIL) != 0); 2703 } 2704 2705 /* 2706 * Update rate control statistics for the node. 2707 */ 2708 if (status & IWN_TX_FAIL) { 2709 ifp->if_oerrors++; 2710 ieee80211_ratectl_tx_complete(vap, ni, 2711 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2712 } else { 2713 ifp->if_opackets++; 2714 ieee80211_ratectl_tx_complete(vap, ni, 2715 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2716 } 2717 m_freem(m); 2718 ieee80211_free_node(ni); 2719 2720 sc->sc_tx_timer = 0; 2721 if (--ring->queued < IWN_TX_RING_LOMARK) { 2722 sc->qfullmsk &= ~(1 << ring->qid); 2723 if (sc->qfullmsk == 0 && 2724 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2725 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2726 iwn_start_locked(ifp); 2727 } 2728 } 2729 } 2730 2731 /* 2732 * Process a "command done" firmware notification. This is where we wakeup 2733 * processes waiting for a synchronous command completion. 2734 */ 2735 static void 2736 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2737 { 2738 struct iwn_tx_ring *ring = &sc->txq[4]; 2739 struct iwn_tx_data *data; 2740 2741 if ((desc->qid & 0xf) != 4) 2742 return; /* Not a command ack. */ 2743 2744 data = &ring->data[desc->idx]; 2745 2746 /* If the command was mapped in an mbuf, free it. */ 2747 if (data->m != NULL) { 2748 bus_dmamap_sync(ring->data_dmat, data->map, 2749 BUS_DMASYNC_POSTWRITE); 2750 bus_dmamap_unload(ring->data_dmat, data->map); 2751 m_freem(data->m); 2752 data->m = NULL; 2753 } 2754 wakeup(&ring->desc[desc->idx]); 2755 } 2756 2757 static void 2758 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 2759 void *stat) 2760 { 2761 struct ifnet *ifp = sc->sc_ifp; 2762 struct iwn_tx_ring *ring = &sc->txq[qid]; 2763 struct iwn_tx_data *data; 2764 struct mbuf *m; 2765 struct iwn_node *wn; 2766 struct ieee80211_node *ni; 2767 struct ieee80211vap *vap; 2768 struct ieee80211_tx_ampdu *tap; 2769 uint64_t bitmap; 2770 uint32_t *status = stat; 2771 uint16_t *aggstatus = stat; 2772 uint8_t tid; 2773 int bit, i, lastidx, seqno, shift, start; 2774 2775 #ifdef NOT_YET 2776 if (nframes == 1) { 2777 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) 2778 printf("ieee80211_send_bar()\n"); 2779 } 2780 #endif 2781 2782 bitmap = 0; 2783 start = idx; 2784 for (i = 0; i < nframes; i++) { 2785 if (le16toh(aggstatus[i * 2]) & 0xc) 2786 continue; 2787 2788 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 2789 bit = idx - start; 2790 shift = 0; 2791 if (bit >= 64) { 2792 shift = 0x100 - idx + start; 2793 bit = 0; 2794 start = idx; 2795 } else if (bit <= -64) 2796 bit = 0x100 - start + idx; 2797 else if (bit < 0) { 2798 shift = start - idx; 2799 start = idx; 2800 bit = 0; 2801 } 2802 bitmap = bitmap << shift; 2803 bitmap |= 1ULL << bit; 2804 } 2805 tap = sc->qid2tap[qid]; 2806 if (tap != NULL) { 2807 tid = WME_AC_TO_TID(tap->txa_ac); 2808 wn = (void *)tap->txa_ni; 2809 wn->agg[tid].bitmap = bitmap; 2810 wn->agg[tid].startidx = start; 2811 wn->agg[tid].nframes = nframes; 2812 } 2813 2814 seqno = le32toh(*(status + nframes)) & 0xfff; 2815 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 2816 data = &ring->data[ring->read]; 2817 2818 KASSERT(data->ni != NULL, ("no node")); 2819 2820 /* Unmap and free mbuf. */ 2821 bus_dmamap_sync(ring->data_dmat, data->map, 2822 BUS_DMASYNC_POSTWRITE); 2823 bus_dmamap_unload(ring->data_dmat, data->map); 2824 m = data->m, data->m = NULL; 2825 ni = data->ni, data->ni = NULL; 2826 vap = ni->ni_vap; 2827 2828 if (m->m_flags & M_TXCB) 2829 ieee80211_process_callback(ni, m, 1); 2830 2831 m_freem(m); 2832 ieee80211_free_node(ni); 2833 2834 ring->queued--; 2835 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 2836 } 2837 2838 sc->sc_tx_timer = 0; 2839 if (ring->queued < IWN_TX_RING_LOMARK) { 2840 sc->qfullmsk &= ~(1 << ring->qid); 2841 if (sc->qfullmsk == 0 && 2842 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2843 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2844 iwn_start_locked(ifp); 2845 } 2846 } 2847 } 2848 2849 /* 2850 * Process an INT_FH_RX or INT_SW_RX interrupt. 2851 */ 2852 static void 2853 iwn_notif_intr(struct iwn_softc *sc) 2854 { 2855 struct iwn_ops *ops = &sc->ops; 2856 struct ifnet *ifp = sc->sc_ifp; 2857 struct ieee80211com *ic = ifp->if_l2com; 2858 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2859 uint16_t hw; 2860 2861 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 2862 BUS_DMASYNC_POSTREAD); 2863 2864 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 2865 while (sc->rxq.cur != hw) { 2866 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2867 struct iwn_rx_desc *desc; 2868 2869 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2870 BUS_DMASYNC_POSTREAD); 2871 desc = mtod(data->m, struct iwn_rx_desc *); 2872 2873 DPRINTF(sc, IWN_DEBUG_RECV, 2874 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 2875 __func__, desc->qid & 0xf, desc->idx, desc->flags, 2876 desc->type, iwn_intr_str(desc->type), 2877 le16toh(desc->len)); 2878 2879 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2880 iwn_cmd_done(sc, desc); 2881 2882 switch (desc->type) { 2883 case IWN_RX_PHY: 2884 iwn_rx_phy(sc, desc, data); 2885 break; 2886 2887 case IWN_RX_DONE: /* 4965AGN only. */ 2888 case IWN_MPDU_RX_DONE: 2889 /* An 802.11 frame has been received. */ 2890 iwn_rx_done(sc, desc, data); 2891 break; 2892 2893 case IWN_RX_COMPRESSED_BA: 2894 /* A Compressed BlockAck has been received. */ 2895 iwn_rx_compressed_ba(sc, desc, data); 2896 break; 2897 2898 case IWN_TX_DONE: 2899 /* An 802.11 frame has been transmitted. */ 2900 ops->tx_done(sc, desc, data); 2901 break; 2902 2903 case IWN_RX_STATISTICS: 2904 case IWN_BEACON_STATISTICS: 2905 iwn_rx_statistics(sc, desc, data); 2906 break; 2907 2908 case IWN_BEACON_MISSED: 2909 { 2910 struct iwn_beacon_missed *miss = 2911 (struct iwn_beacon_missed *)(desc + 1); 2912 int misses; 2913 2914 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2915 BUS_DMASYNC_POSTREAD); 2916 misses = le32toh(miss->consecutive); 2917 2918 DPRINTF(sc, IWN_DEBUG_STATE, 2919 "%s: beacons missed %d/%d\n", __func__, 2920 misses, le32toh(miss->total)); 2921 /* 2922 * If more than 5 consecutive beacons are missed, 2923 * reinitialize the sensitivity state machine. 2924 */ 2925 if (vap->iv_state == IEEE80211_S_RUN && 2926 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 2927 if (misses > 5) 2928 (void)iwn_init_sensitivity(sc); 2929 if (misses >= vap->iv_bmissthreshold) { 2930 IWN_UNLOCK(sc); 2931 ieee80211_beacon_miss(ic); 2932 IWN_LOCK(sc); 2933 } 2934 } 2935 break; 2936 } 2937 case IWN_UC_READY: 2938 { 2939 struct iwn_ucode_info *uc = 2940 (struct iwn_ucode_info *)(desc + 1); 2941 2942 /* The microcontroller is ready. */ 2943 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2944 BUS_DMASYNC_POSTREAD); 2945 DPRINTF(sc, IWN_DEBUG_RESET, 2946 "microcode alive notification version=%d.%d " 2947 "subtype=%x alive=%x\n", uc->major, uc->minor, 2948 uc->subtype, le32toh(uc->valid)); 2949 2950 if (le32toh(uc->valid) != 1) { 2951 device_printf(sc->sc_dev, 2952 "microcontroller initialization failed"); 2953 break; 2954 } 2955 if (uc->subtype == IWN_UCODE_INIT) { 2956 /* Save microcontroller report. */ 2957 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2958 } 2959 /* Save the address of the error log in SRAM. */ 2960 sc->errptr = le32toh(uc->errptr); 2961 break; 2962 } 2963 case IWN_STATE_CHANGED: 2964 { 2965 uint32_t *status = (uint32_t *)(desc + 1); 2966 2967 /* 2968 * State change allows hardware switch change to be 2969 * noted. However, we handle this in iwn_intr as we 2970 * get both the enable/disble intr. 2971 */ 2972 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2973 BUS_DMASYNC_POSTREAD); 2974 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 2975 le32toh(*status)); 2976 break; 2977 } 2978 case IWN_START_SCAN: 2979 { 2980 struct iwn_start_scan *scan = 2981 (struct iwn_start_scan *)(desc + 1); 2982 2983 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2984 BUS_DMASYNC_POSTREAD); 2985 DPRINTF(sc, IWN_DEBUG_ANY, 2986 "%s: scanning channel %d status %x\n", 2987 __func__, scan->chan, le32toh(scan->status)); 2988 break; 2989 } 2990 case IWN_STOP_SCAN: 2991 { 2992 struct iwn_stop_scan *scan = 2993 (struct iwn_stop_scan *)(desc + 1); 2994 2995 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2996 BUS_DMASYNC_POSTREAD); 2997 DPRINTF(sc, IWN_DEBUG_STATE, 2998 "scan finished nchan=%d status=%d chan=%d\n", 2999 scan->nchan, scan->status, scan->chan); 3000 3001 IWN_UNLOCK(sc); 3002 ieee80211_scan_next(vap); 3003 IWN_LOCK(sc); 3004 break; 3005 } 3006 case IWN5000_CALIBRATION_RESULT: 3007 iwn5000_rx_calib_results(sc, desc, data); 3008 break; 3009 3010 case IWN5000_CALIBRATION_DONE: 3011 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3012 wakeup(sc); 3013 break; 3014 } 3015 3016 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3017 } 3018 3019 /* Tell the firmware what we have processed. */ 3020 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3021 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3022 } 3023 3024 /* 3025 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3026 * from power-down sleep mode. 3027 */ 3028 static void 3029 iwn_wakeup_intr(struct iwn_softc *sc) 3030 { 3031 int qid; 3032 3033 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3034 __func__); 3035 3036 /* Wakeup RX and TX rings. */ 3037 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3038 for (qid = 0; qid < sc->ntxqs; qid++) { 3039 struct iwn_tx_ring *ring = &sc->txq[qid]; 3040 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3041 } 3042 } 3043 3044 static void 3045 iwn_rftoggle_intr(struct iwn_softc *sc) 3046 { 3047 struct ifnet *ifp = sc->sc_ifp; 3048 struct ieee80211com *ic = ifp->if_l2com; 3049 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3050 3051 IWN_LOCK_ASSERT(sc); 3052 3053 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3054 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3055 if (tmp & IWN_GP_CNTRL_RFKILL) 3056 ieee80211_runtask(ic, &sc->sc_radioon_task); 3057 else 3058 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3059 } 3060 3061 /* 3062 * Dump the error log of the firmware when a firmware panic occurs. Although 3063 * we can't debug the firmware because it is neither open source nor free, it 3064 * can help us to identify certain classes of problems. 3065 */ 3066 static void 3067 iwn_fatal_intr(struct iwn_softc *sc) 3068 { 3069 struct iwn_fw_dump dump; 3070 int i; 3071 3072 IWN_LOCK_ASSERT(sc); 3073 3074 /* Force a complete recalibration on next init. */ 3075 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3076 3077 /* Check that the error log address is valid. */ 3078 if (sc->errptr < IWN_FW_DATA_BASE || 3079 sc->errptr + sizeof (dump) > 3080 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3081 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3082 sc->errptr); 3083 return; 3084 } 3085 if (iwn_nic_lock(sc) != 0) { 3086 printf("%s: could not read firmware error log\n", __func__); 3087 return; 3088 } 3089 /* Read firmware error log from SRAM. */ 3090 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3091 sizeof (dump) / sizeof (uint32_t)); 3092 iwn_nic_unlock(sc); 3093 3094 if (dump.valid == 0) { 3095 printf("%s: firmware error log is empty\n", __func__); 3096 return; 3097 } 3098 printf("firmware error log:\n"); 3099 printf(" error type = \"%s\" (0x%08X)\n", 3100 (dump.id < nitems(iwn_fw_errmsg)) ? 3101 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3102 dump.id); 3103 printf(" program counter = 0x%08X\n", dump.pc); 3104 printf(" source line = 0x%08X\n", dump.src_line); 3105 printf(" error data = 0x%08X%08X\n", 3106 dump.error_data[0], dump.error_data[1]); 3107 printf(" branch link = 0x%08X%08X\n", 3108 dump.branch_link[0], dump.branch_link[1]); 3109 printf(" interrupt link = 0x%08X%08X\n", 3110 dump.interrupt_link[0], dump.interrupt_link[1]); 3111 printf(" time = %u\n", dump.time[0]); 3112 3113 /* Dump driver status (TX and RX rings) while we're here. */ 3114 printf("driver status:\n"); 3115 for (i = 0; i < sc->ntxqs; i++) { 3116 struct iwn_tx_ring *ring = &sc->txq[i]; 3117 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3118 i, ring->qid, ring->cur, ring->queued); 3119 } 3120 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3121 } 3122 3123 static void 3124 iwn_intr(void *arg) 3125 { 3126 struct iwn_softc *sc = arg; 3127 struct ifnet *ifp = sc->sc_ifp; 3128 uint32_t r1, r2, tmp; 3129 3130 IWN_LOCK(sc); 3131 3132 /* Disable interrupts. */ 3133 IWN_WRITE(sc, IWN_INT_MASK, 0); 3134 3135 /* Read interrupts from ICT (fast) or from registers (slow). */ 3136 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3137 tmp = 0; 3138 while (sc->ict[sc->ict_cur] != 0) { 3139 tmp |= sc->ict[sc->ict_cur]; 3140 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3141 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3142 } 3143 tmp = le32toh(tmp); 3144 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3145 tmp = 0; 3146 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3147 tmp |= 0x8000; 3148 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3149 r2 = 0; /* Unused. */ 3150 } else { 3151 r1 = IWN_READ(sc, IWN_INT); 3152 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3153 return; /* Hardware gone! */ 3154 r2 = IWN_READ(sc, IWN_FH_INT); 3155 } 3156 3157 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); 3158 3159 if (r1 == 0 && r2 == 0) 3160 goto done; /* Interrupt not for us. */ 3161 3162 /* Acknowledge interrupts. */ 3163 IWN_WRITE(sc, IWN_INT, r1); 3164 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3165 IWN_WRITE(sc, IWN_FH_INT, r2); 3166 3167 if (r1 & IWN_INT_RF_TOGGLED) { 3168 iwn_rftoggle_intr(sc); 3169 goto done; 3170 } 3171 if (r1 & IWN_INT_CT_REACHED) { 3172 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3173 __func__); 3174 } 3175 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3176 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3177 __func__); 3178 /* Dump firmware error log and stop. */ 3179 iwn_fatal_intr(sc); 3180 ifp->if_flags &= ~IFF_UP; 3181 iwn_stop_locked(sc); 3182 goto done; 3183 } 3184 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3185 (r2 & IWN_FH_INT_RX)) { 3186 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3187 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3188 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3189 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3190 IWN_INT_PERIODIC_DIS); 3191 iwn_notif_intr(sc); 3192 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3193 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3194 IWN_INT_PERIODIC_ENA); 3195 } 3196 } else 3197 iwn_notif_intr(sc); 3198 } 3199 3200 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3201 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3202 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3203 wakeup(sc); /* FH DMA transfer completed. */ 3204 } 3205 3206 if (r1 & IWN_INT_ALIVE) 3207 wakeup(sc); /* Firmware is alive. */ 3208 3209 if (r1 & IWN_INT_WAKEUP) 3210 iwn_wakeup_intr(sc); 3211 3212 done: 3213 /* Re-enable interrupts. */ 3214 if (ifp->if_flags & IFF_UP) 3215 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3216 3217 IWN_UNLOCK(sc); 3218 } 3219 3220 /* 3221 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3222 * 5000 adapters use a slightly different format). 3223 */ 3224 static void 3225 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3226 uint16_t len) 3227 { 3228 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3229 3230 *w = htole16(len + 8); 3231 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3232 BUS_DMASYNC_PREWRITE); 3233 if (idx < IWN_SCHED_WINSZ) { 3234 *(w + IWN_TX_RING_COUNT) = *w; 3235 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3236 BUS_DMASYNC_PREWRITE); 3237 } 3238 } 3239 3240 static void 3241 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3242 uint16_t len) 3243 { 3244 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3245 3246 *w = htole16(id << 12 | (len + 8)); 3247 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3248 BUS_DMASYNC_PREWRITE); 3249 if (idx < IWN_SCHED_WINSZ) { 3250 *(w + IWN_TX_RING_COUNT) = *w; 3251 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3252 BUS_DMASYNC_PREWRITE); 3253 } 3254 } 3255 3256 #ifdef notyet 3257 static void 3258 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3259 { 3260 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3261 3262 *w = (*w & htole16(0xf000)) | htole16(1); 3263 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3264 BUS_DMASYNC_PREWRITE); 3265 if (idx < IWN_SCHED_WINSZ) { 3266 *(w + IWN_TX_RING_COUNT) = *w; 3267 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3268 BUS_DMASYNC_PREWRITE); 3269 } 3270 } 3271 #endif 3272 3273 static int 3274 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3275 { 3276 struct iwn_ops *ops = &sc->ops; 3277 const struct ieee80211_txparam *tp; 3278 struct ieee80211vap *vap = ni->ni_vap; 3279 struct ieee80211com *ic = ni->ni_ic; 3280 struct iwn_node *wn = (void *)ni; 3281 struct iwn_tx_ring *ring; 3282 struct iwn_tx_desc *desc; 3283 struct iwn_tx_data *data; 3284 struct iwn_tx_cmd *cmd; 3285 struct iwn_cmd_data *tx; 3286 struct ieee80211_frame *wh; 3287 struct ieee80211_key *k = NULL; 3288 struct mbuf *m1; 3289 uint32_t flags; 3290 uint16_t qos; 3291 u_int hdrlen; 3292 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3293 uint8_t tid, ridx, txant, type; 3294 int ac, i, totlen, error, pad, nsegs = 0, rate; 3295 3296 IWN_LOCK_ASSERT(sc); 3297 3298 wh = mtod(m, struct ieee80211_frame *); 3299 hdrlen = ieee80211_anyhdrsize(wh); 3300 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3301 3302 /* Select EDCA Access Category and TX ring for this frame. */ 3303 if (IEEE80211_QOS_HAS_SEQ(wh)) { 3304 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 3305 tid = qos & IEEE80211_QOS_TID; 3306 } else { 3307 qos = 0; 3308 tid = 0; 3309 } 3310 ac = M_WME_GETAC(m); 3311 3312 if (IEEE80211_QOS_HAS_SEQ(wh) && 3313 IEEE80211_AMPDU_RUNNING(&ni->ni_tx_ampdu[ac])) { 3314 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 3315 3316 ring = &sc->txq[*(int *)tap->txa_private]; 3317 *(uint16_t *)wh->i_seq = 3318 htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT); 3319 ni->ni_txseqs[tid]++; 3320 } else { 3321 ring = &sc->txq[ac]; 3322 } 3323 desc = &ring->desc[ring->cur]; 3324 data = &ring->data[ring->cur]; 3325 3326 /* Choose a TX rate index. */ 3327 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 3328 if (type == IEEE80211_FC0_TYPE_MGT) 3329 rate = tp->mgmtrate; 3330 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 3331 rate = tp->mcastrate; 3332 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 3333 rate = tp->ucastrate; 3334 else { 3335 /* XXX pass pktlen */ 3336 (void) ieee80211_ratectl_rate(ni, NULL, 0); 3337 rate = ni->ni_txrate; 3338 } 3339 ridx = ic->ic_rt->rateCodeToIndex[rate]; 3340 3341 /* Encrypt the frame if need be. */ 3342 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 3343 /* Retrieve key for TX. */ 3344 k = ieee80211_crypto_encap(ni, m); 3345 if (k == NULL) { 3346 m_freem(m); 3347 return ENOBUFS; 3348 } 3349 /* 802.11 header may have moved. */ 3350 wh = mtod(m, struct ieee80211_frame *); 3351 } 3352 totlen = m->m_pkthdr.len; 3353 3354 if (ieee80211_radiotap_active_vap(vap)) { 3355 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3356 3357 tap->wt_flags = 0; 3358 tap->wt_rate = rate; 3359 if (k != NULL) 3360 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3361 3362 ieee80211_radiotap_tx(vap, m); 3363 } 3364 3365 /* Prepare TX firmware command. */ 3366 cmd = &ring->cmd[ring->cur]; 3367 cmd->code = IWN_CMD_TX_DATA; 3368 cmd->flags = 0; 3369 cmd->qid = ring->qid; 3370 cmd->idx = ring->cur; 3371 3372 tx = (struct iwn_cmd_data *)cmd->data; 3373 /* NB: No need to clear tx, all fields are reinitialized here. */ 3374 tx->scratch = 0; /* clear "scratch" area */ 3375 3376 flags = 0; 3377 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3378 /* Unicast frame, check if an ACK is expected. */ 3379 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 3380 IEEE80211_QOS_ACKPOLICY_NOACK) 3381 flags |= IWN_TX_NEED_ACK; 3382 } 3383 if ((wh->i_fc[0] & 3384 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3385 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 3386 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 3387 3388 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3389 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3390 3391 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3392 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3393 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3394 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 3395 flags |= IWN_TX_NEED_RTS; 3396 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3397 ridx >= IWN_RIDX_OFDM6) { 3398 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3399 flags |= IWN_TX_NEED_CTS; 3400 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3401 flags |= IWN_TX_NEED_RTS; 3402 } 3403 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3404 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3405 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3406 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3407 flags |= IWN_TX_NEED_PROTECTION; 3408 } else 3409 flags |= IWN_TX_FULL_TXOP; 3410 } 3411 } 3412 3413 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3414 type != IEEE80211_FC0_TYPE_DATA) 3415 tx->id = sc->broadcast_id; 3416 else 3417 tx->id = wn->id; 3418 3419 if (type == IEEE80211_FC0_TYPE_MGT) { 3420 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3421 3422 /* Tell HW to set timestamp in probe responses. */ 3423 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3424 flags |= IWN_TX_INSERT_TSTAMP; 3425 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3426 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3427 tx->timeout = htole16(3); 3428 else 3429 tx->timeout = htole16(2); 3430 } else 3431 tx->timeout = htole16(0); 3432 3433 if (hdrlen & 3) { 3434 /* First segment length must be a multiple of 4. */ 3435 flags |= IWN_TX_NEED_PADDING; 3436 pad = 4 - (hdrlen & 3); 3437 } else 3438 pad = 0; 3439 3440 tx->len = htole16(totlen); 3441 tx->tid = tid; 3442 tx->rts_ntries = 60; 3443 tx->data_ntries = 15; 3444 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3445 tx->rate = wn->ridx[rate]; 3446 if (tx->id == sc->broadcast_id) { 3447 /* Group or management frame. */ 3448 tx->linkq = 0; 3449 /* XXX Alternate between antenna A and B? */ 3450 txant = IWN_LSB(sc->txchainmask); 3451 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3452 } else { 3453 tx->linkq = ni->ni_rates.rs_nrates - ridx - 1; 3454 flags |= IWN_TX_LINKQ; /* enable MRR */ 3455 } 3456 /* Set physical address of "scratch area". */ 3457 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3458 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3459 3460 /* Copy 802.11 header in TX command. */ 3461 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3462 3463 /* Trim 802.11 header. */ 3464 m_adj(m, hdrlen); 3465 tx->security = 0; 3466 tx->flags = htole32(flags); 3467 3468 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3469 &nsegs, BUS_DMA_NOWAIT); 3470 if (error != 0) { 3471 if (error != EFBIG) { 3472 device_printf(sc->sc_dev, 3473 "%s: can't map mbuf (error %d)\n", __func__, error); 3474 m_freem(m); 3475 return error; 3476 } 3477 /* Too many DMA segments, linearize mbuf. */ 3478 m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3479 if (m1 == NULL) { 3480 device_printf(sc->sc_dev, 3481 "%s: could not defrag mbuf\n", __func__); 3482 m_freem(m); 3483 return ENOBUFS; 3484 } 3485 m = m1; 3486 3487 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3488 segs, &nsegs, BUS_DMA_NOWAIT); 3489 if (error != 0) { 3490 device_printf(sc->sc_dev, 3491 "%s: can't map mbuf (error %d)\n", __func__, error); 3492 m_freem(m); 3493 return error; 3494 } 3495 } 3496 3497 data->m = m; 3498 data->ni = ni; 3499 3500 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3501 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3502 3503 /* Fill TX descriptor. */ 3504 desc->nsegs = 1; 3505 if (m->m_len != 0) 3506 desc->nsegs += nsegs; 3507 /* First DMA segment is used by the TX command. */ 3508 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3509 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3510 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3511 /* Other DMA segments are for data payload. */ 3512 seg = &segs[0]; 3513 for (i = 1; i <= nsegs; i++) { 3514 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3515 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3516 seg->ds_len << 4); 3517 seg++; 3518 } 3519 3520 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3521 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3522 BUS_DMASYNC_PREWRITE); 3523 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3524 BUS_DMASYNC_PREWRITE); 3525 3526 /* Update TX scheduler. */ 3527 if (ring->qid >= sc->firstaggqueue) 3528 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3529 3530 /* Kick TX ring. */ 3531 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3532 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3533 3534 /* Mark TX ring as full if we reach a certain threshold. */ 3535 if (++ring->queued > IWN_TX_RING_HIMARK) 3536 sc->qfullmsk |= 1 << ring->qid; 3537 3538 return 0; 3539 } 3540 3541 static int 3542 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3543 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 3544 { 3545 struct iwn_ops *ops = &sc->ops; 3546 struct ifnet *ifp = sc->sc_ifp; 3547 struct ieee80211vap *vap = ni->ni_vap; 3548 struct ieee80211com *ic = ifp->if_l2com; 3549 struct iwn_tx_cmd *cmd; 3550 struct iwn_cmd_data *tx; 3551 struct ieee80211_frame *wh; 3552 struct iwn_tx_ring *ring; 3553 struct iwn_tx_desc *desc; 3554 struct iwn_tx_data *data; 3555 struct mbuf *m1; 3556 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3557 uint32_t flags; 3558 u_int hdrlen; 3559 int ac, totlen, error, pad, nsegs = 0, i, rate; 3560 uint8_t ridx, type, txant; 3561 3562 IWN_LOCK_ASSERT(sc); 3563 3564 wh = mtod(m, struct ieee80211_frame *); 3565 hdrlen = ieee80211_anyhdrsize(wh); 3566 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3567 3568 ac = params->ibp_pri & 3; 3569 3570 ring = &sc->txq[ac]; 3571 desc = &ring->desc[ring->cur]; 3572 data = &ring->data[ring->cur]; 3573 3574 /* Choose a TX rate index. */ 3575 rate = params->ibp_rate0; 3576 ridx = ic->ic_rt->rateCodeToIndex[rate]; 3577 if (ridx == (uint8_t)-1) { 3578 /* XXX fall back to mcast/mgmt rate? */ 3579 m_freem(m); 3580 return EINVAL; 3581 } 3582 3583 totlen = m->m_pkthdr.len; 3584 3585 /* Prepare TX firmware command. */ 3586 cmd = &ring->cmd[ring->cur]; 3587 cmd->code = IWN_CMD_TX_DATA; 3588 cmd->flags = 0; 3589 cmd->qid = ring->qid; 3590 cmd->idx = ring->cur; 3591 3592 tx = (struct iwn_cmd_data *)cmd->data; 3593 /* NB: No need to clear tx, all fields are reinitialized here. */ 3594 tx->scratch = 0; /* clear "scratch" area */ 3595 3596 flags = 0; 3597 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3598 flags |= IWN_TX_NEED_ACK; 3599 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3600 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3601 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3602 flags &= ~IWN_TX_NEED_RTS; 3603 flags |= IWN_TX_NEED_PROTECTION; 3604 } else 3605 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3606 } 3607 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3608 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3609 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3610 flags &= ~IWN_TX_NEED_CTS; 3611 flags |= IWN_TX_NEED_PROTECTION; 3612 } else 3613 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3614 } 3615 if (type == IEEE80211_FC0_TYPE_MGT) { 3616 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3617 3618 /* Tell HW to set timestamp in probe responses. */ 3619 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3620 flags |= IWN_TX_INSERT_TSTAMP; 3621 3622 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3623 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3624 tx->timeout = htole16(3); 3625 else 3626 tx->timeout = htole16(2); 3627 } else 3628 tx->timeout = htole16(0); 3629 3630 if (hdrlen & 3) { 3631 /* First segment length must be a multiple of 4. */ 3632 flags |= IWN_TX_NEED_PADDING; 3633 pad = 4 - (hdrlen & 3); 3634 } else 3635 pad = 0; 3636 3637 if (ieee80211_radiotap_active_vap(vap)) { 3638 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3639 3640 tap->wt_flags = 0; 3641 tap->wt_rate = rate; 3642 3643 ieee80211_radiotap_tx(vap, m); 3644 } 3645 3646 tx->len = htole16(totlen); 3647 tx->tid = 0; 3648 tx->id = sc->broadcast_id; 3649 tx->rts_ntries = params->ibp_try1; 3650 tx->data_ntries = params->ibp_try0; 3651 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3652 tx->rate = htole32(rate2plcp(rate)); 3653 if (ridx < IWN_RIDX_OFDM6 && 3654 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 3655 tx->rate |= htole32(IWN_RFLAG_CCK); 3656 /* Group or management frame. */ 3657 tx->linkq = 0; 3658 txant = IWN_LSB(sc->txchainmask); 3659 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3660 /* Set physical address of "scratch area". */ 3661 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3662 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3663 3664 /* Copy 802.11 header in TX command. */ 3665 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3666 3667 /* Trim 802.11 header. */ 3668 m_adj(m, hdrlen); 3669 tx->security = 0; 3670 tx->flags = htole32(flags); 3671 3672 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3673 &nsegs, BUS_DMA_NOWAIT); 3674 if (error != 0) { 3675 if (error != EFBIG) { 3676 device_printf(sc->sc_dev, 3677 "%s: can't map mbuf (error %d)\n", __func__, error); 3678 m_freem(m); 3679 return error; 3680 } 3681 /* Too many DMA segments, linearize mbuf. */ 3682 m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER); 3683 if (m1 == NULL) { 3684 device_printf(sc->sc_dev, 3685 "%s: could not defrag mbuf\n", __func__); 3686 m_freem(m); 3687 return ENOBUFS; 3688 } 3689 m = m1; 3690 3691 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3692 segs, &nsegs, BUS_DMA_NOWAIT); 3693 if (error != 0) { 3694 device_printf(sc->sc_dev, 3695 "%s: can't map mbuf (error %d)\n", __func__, error); 3696 m_freem(m); 3697 return error; 3698 } 3699 } 3700 3701 data->m = m; 3702 data->ni = ni; 3703 3704 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3705 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3706 3707 /* Fill TX descriptor. */ 3708 desc->nsegs = 1; 3709 if (m->m_len != 0) 3710 desc->nsegs += nsegs; 3711 /* First DMA segment is used by the TX command. */ 3712 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3713 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3714 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3715 /* Other DMA segments are for data payload. */ 3716 seg = &segs[0]; 3717 for (i = 1; i <= nsegs; i++) { 3718 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3719 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3720 seg->ds_len << 4); 3721 seg++; 3722 } 3723 3724 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3725 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3726 BUS_DMASYNC_PREWRITE); 3727 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3728 BUS_DMASYNC_PREWRITE); 3729 3730 /* Update TX scheduler. */ 3731 if (ring->qid >= sc->firstaggqueue) 3732 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3733 3734 /* Kick TX ring. */ 3735 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3736 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3737 3738 /* Mark TX ring as full if we reach a certain threshold. */ 3739 if (++ring->queued > IWN_TX_RING_HIMARK) 3740 sc->qfullmsk |= 1 << ring->qid; 3741 3742 return 0; 3743 } 3744 3745 static int 3746 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3747 const struct ieee80211_bpf_params *params) 3748 { 3749 struct ieee80211com *ic = ni->ni_ic; 3750 struct ifnet *ifp = ic->ic_ifp; 3751 struct iwn_softc *sc = ifp->if_softc; 3752 int error = 0; 3753 3754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3755 ieee80211_free_node(ni); 3756 m_freem(m); 3757 return ENETDOWN; 3758 } 3759 3760 IWN_LOCK(sc); 3761 if (params == NULL) { 3762 /* 3763 * Legacy path; interpret frame contents to decide 3764 * precisely how to send the frame. 3765 */ 3766 error = iwn_tx_data(sc, m, ni); 3767 } else { 3768 /* 3769 * Caller supplied explicit parameters to use in 3770 * sending the frame. 3771 */ 3772 error = iwn_tx_data_raw(sc, m, ni, params); 3773 } 3774 if (error != 0) { 3775 /* NB: m is reclaimed on tx failure */ 3776 ieee80211_free_node(ni); 3777 ifp->if_oerrors++; 3778 } 3779 sc->sc_tx_timer = 5; 3780 3781 IWN_UNLOCK(sc); 3782 return error; 3783 } 3784 3785 static void 3786 iwn_start(struct ifnet *ifp) 3787 { 3788 struct iwn_softc *sc = ifp->if_softc; 3789 3790 IWN_LOCK(sc); 3791 iwn_start_locked(ifp); 3792 IWN_UNLOCK(sc); 3793 } 3794 3795 static void 3796 iwn_start_locked(struct ifnet *ifp) 3797 { 3798 struct iwn_softc *sc = ifp->if_softc; 3799 struct ieee80211_node *ni; 3800 struct mbuf *m; 3801 3802 IWN_LOCK_ASSERT(sc); 3803 3804 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 3805 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 3806 return; 3807 3808 for (;;) { 3809 if (sc->qfullmsk != 0) { 3810 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3811 break; 3812 } 3813 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 3814 if (m == NULL) 3815 break; 3816 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 3817 if (iwn_tx_data(sc, m, ni) != 0) { 3818 ieee80211_free_node(ni); 3819 ifp->if_oerrors++; 3820 continue; 3821 } 3822 sc->sc_tx_timer = 5; 3823 } 3824 } 3825 3826 static void 3827 iwn_watchdog(void *arg) 3828 { 3829 struct iwn_softc *sc = arg; 3830 struct ifnet *ifp = sc->sc_ifp; 3831 struct ieee80211com *ic = ifp->if_l2com; 3832 3833 IWN_LOCK_ASSERT(sc); 3834 3835 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 3836 3837 if (sc->sc_tx_timer > 0) { 3838 if (--sc->sc_tx_timer == 0) { 3839 if_printf(ifp, "device timeout\n"); 3840 ieee80211_runtask(ic, &sc->sc_reinit_task); 3841 return; 3842 } 3843 } 3844 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 3845 } 3846 3847 static int 3848 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3849 { 3850 struct iwn_softc *sc = ifp->if_softc; 3851 struct ieee80211com *ic = ifp->if_l2com; 3852 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3853 struct ifreq *ifr = (struct ifreq *) data; 3854 int error = 0, startall = 0, stop = 0; 3855 3856 switch (cmd) { 3857 case SIOCGIFADDR: 3858 error = ether_ioctl(ifp, cmd, data); 3859 break; 3860 case SIOCSIFFLAGS: 3861 IWN_LOCK(sc); 3862 if (ifp->if_flags & IFF_UP) { 3863 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3864 iwn_init_locked(sc); 3865 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 3866 startall = 1; 3867 else 3868 stop = 1; 3869 } 3870 } else { 3871 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3872 iwn_stop_locked(sc); 3873 } 3874 IWN_UNLOCK(sc); 3875 if (startall) 3876 ieee80211_start_all(ic); 3877 else if (vap != NULL && stop) 3878 ieee80211_stop(vap); 3879 break; 3880 case SIOCGIFMEDIA: 3881 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3882 break; 3883 default: 3884 error = EINVAL; 3885 break; 3886 } 3887 return error; 3888 } 3889 3890 /* 3891 * Send a command to the firmware. 3892 */ 3893 static int 3894 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3895 { 3896 struct iwn_tx_ring *ring = &sc->txq[4]; 3897 struct iwn_tx_desc *desc; 3898 struct iwn_tx_data *data; 3899 struct iwn_tx_cmd *cmd; 3900 struct mbuf *m; 3901 bus_addr_t paddr; 3902 int totlen, error; 3903 3904 if (async == 0) 3905 IWN_LOCK_ASSERT(sc); 3906 3907 desc = &ring->desc[ring->cur]; 3908 data = &ring->data[ring->cur]; 3909 totlen = 4 + size; 3910 3911 if (size > sizeof cmd->data) { 3912 /* Command is too large to fit in a descriptor. */ 3913 if (totlen > MCLBYTES) 3914 return EINVAL; 3915 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3916 if (m == NULL) 3917 return ENOMEM; 3918 cmd = mtod(m, struct iwn_tx_cmd *); 3919 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3920 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3921 if (error != 0) { 3922 m_freem(m); 3923 return error; 3924 } 3925 data->m = m; 3926 } else { 3927 cmd = &ring->cmd[ring->cur]; 3928 paddr = data->cmd_paddr; 3929 } 3930 3931 cmd->code = code; 3932 cmd->flags = 0; 3933 cmd->qid = ring->qid; 3934 cmd->idx = ring->cur; 3935 memcpy(cmd->data, buf, size); 3936 3937 desc->nsegs = 1; 3938 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3939 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3940 3941 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 3942 __func__, iwn_intr_str(cmd->code), cmd->code, 3943 cmd->flags, cmd->qid, cmd->idx); 3944 3945 if (size > sizeof cmd->data) { 3946 bus_dmamap_sync(ring->data_dmat, data->map, 3947 BUS_DMASYNC_PREWRITE); 3948 } else { 3949 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3950 BUS_DMASYNC_PREWRITE); 3951 } 3952 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3953 BUS_DMASYNC_PREWRITE); 3954 3955 /* Kick command ring. */ 3956 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3957 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3958 3959 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 3960 } 3961 3962 static int 3963 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3964 { 3965 struct iwn4965_node_info hnode; 3966 caddr_t src, dst; 3967 3968 /* 3969 * We use the node structure for 5000 Series internally (it is 3970 * a superset of the one for 4965AGN). We thus copy the common 3971 * fields before sending the command. 3972 */ 3973 src = (caddr_t)node; 3974 dst = (caddr_t)&hnode; 3975 memcpy(dst, src, 48); 3976 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3977 memcpy(dst + 48, src + 72, 20); 3978 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3979 } 3980 3981 static int 3982 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3983 { 3984 /* Direct mapping. */ 3985 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3986 } 3987 3988 static int 3989 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3990 { 3991 #define RV(v) ((v) & IEEE80211_RATE_VAL) 3992 struct iwn_node *wn = (void *)ni; 3993 struct ieee80211_rateset *rs = &ni->ni_rates; 3994 struct iwn_cmd_link_quality linkq; 3995 uint8_t txant; 3996 int i, rate, txrate; 3997 3998 /* Use the first valid TX antenna. */ 3999 txant = IWN_LSB(sc->txchainmask); 4000 4001 memset(&linkq, 0, sizeof linkq); 4002 linkq.id = wn->id; 4003 linkq.antmsk_1stream = txant; 4004 linkq.antmsk_2stream = IWN_ANT_AB; 4005 linkq.ampdu_max = 64; 4006 linkq.ampdu_threshold = 3; 4007 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4008 4009 /* Start at highest available bit-rate. */ 4010 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4011 txrate = ni->ni_htrates.rs_nrates - 1; 4012 else 4013 txrate = rs->rs_nrates - 1; 4014 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4015 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4016 rate = IEEE80211_RATE_MCS | txrate; 4017 else 4018 rate = RV(rs->rs_rates[txrate]); 4019 linkq.retry[i] = wn->ridx[rate]; 4020 4021 if ((le32toh(wn->ridx[rate]) & IWN_RFLAG_MCS) && 4022 RV(le32toh(wn->ridx[rate])) > 7) 4023 linkq.mimo = i + 1; 4024 4025 /* Next retry at immediate lower bit-rate. */ 4026 if (txrate > 0) 4027 txrate--; 4028 } 4029 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4030 #undef RV 4031 } 4032 4033 /* 4034 * Broadcast node is used to send group-addressed and management frames. 4035 */ 4036 static int 4037 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4038 { 4039 struct iwn_ops *ops = &sc->ops; 4040 struct ifnet *ifp = sc->sc_ifp; 4041 struct ieee80211com *ic = ifp->if_l2com; 4042 struct iwn_node_info node; 4043 struct iwn_cmd_link_quality linkq; 4044 uint8_t txant; 4045 int i, error; 4046 4047 memset(&node, 0, sizeof node); 4048 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4049 node.id = sc->broadcast_id; 4050 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4051 if ((error = ops->add_node(sc, &node, async)) != 0) 4052 return error; 4053 4054 /* Use the first valid TX antenna. */ 4055 txant = IWN_LSB(sc->txchainmask); 4056 4057 memset(&linkq, 0, sizeof linkq); 4058 linkq.id = sc->broadcast_id; 4059 linkq.antmsk_1stream = txant; 4060 linkq.antmsk_2stream = IWN_ANT_AB; 4061 linkq.ampdu_max = 64; 4062 linkq.ampdu_threshold = 3; 4063 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4064 4065 /* Use lowest mandatory bit-rate. */ 4066 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4067 linkq.retry[0] = htole32(0xd); 4068 else 4069 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 4070 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 4071 /* Use same bit-rate for all TX retries. */ 4072 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 4073 linkq.retry[i] = linkq.retry[0]; 4074 } 4075 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 4076 } 4077 4078 static int 4079 iwn_updateedca(struct ieee80211com *ic) 4080 { 4081 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 4082 struct iwn_softc *sc = ic->ic_ifp->if_softc; 4083 struct iwn_edca_params cmd; 4084 int aci; 4085 4086 memset(&cmd, 0, sizeof cmd); 4087 cmd.flags = htole32(IWN_EDCA_UPDATE); 4088 for (aci = 0; aci < WME_NUM_AC; aci++) { 4089 const struct wmeParams *ac = 4090 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 4091 cmd.ac[aci].aifsn = ac->wmep_aifsn; 4092 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 4093 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 4094 cmd.ac[aci].txoplimit = 4095 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 4096 } 4097 IEEE80211_UNLOCK(ic); 4098 IWN_LOCK(sc); 4099 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 4100 IWN_UNLOCK(sc); 4101 IEEE80211_LOCK(ic); 4102 return 0; 4103 #undef IWN_EXP2 4104 } 4105 4106 static void 4107 iwn_update_mcast(struct ifnet *ifp) 4108 { 4109 /* Ignore */ 4110 } 4111 4112 static void 4113 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 4114 { 4115 struct iwn_cmd_led led; 4116 4117 /* Clear microcode LED ownership. */ 4118 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 4119 4120 led.which = which; 4121 led.unit = htole32(10000); /* on/off in unit of 100ms */ 4122 led.off = off; 4123 led.on = on; 4124 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 4125 } 4126 4127 /* 4128 * Set the critical temperature at which the firmware will stop the radio 4129 * and notify us. 4130 */ 4131 static int 4132 iwn_set_critical_temp(struct iwn_softc *sc) 4133 { 4134 struct iwn_critical_temp crit; 4135 int32_t temp; 4136 4137 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 4138 4139 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 4140 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 4141 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 4142 temp = IWN_CTOK(110); 4143 else 4144 temp = 110; 4145 memset(&crit, 0, sizeof crit); 4146 crit.tempR = htole32(temp); 4147 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 4148 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 4149 } 4150 4151 static int 4152 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 4153 { 4154 struct iwn_cmd_timing cmd; 4155 uint64_t val, mod; 4156 4157 memset(&cmd, 0, sizeof cmd); 4158 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 4159 cmd.bintval = htole16(ni->ni_intval); 4160 cmd.lintval = htole16(10); 4161 4162 /* Compute remaining time until next beacon. */ 4163 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 4164 mod = le64toh(cmd.tstamp) % val; 4165 cmd.binitval = htole32((uint32_t)(val - mod)); 4166 4167 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 4168 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 4169 4170 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 4171 } 4172 4173 static void 4174 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 4175 { 4176 struct ifnet *ifp = sc->sc_ifp; 4177 struct ieee80211com *ic = ifp->if_l2com; 4178 4179 /* Adjust TX power if need be (delta >= 3 degC). */ 4180 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 4181 __func__, sc->temp, temp); 4182 if (abs(temp - sc->temp) >= 3) { 4183 /* Record temperature of last calibration. */ 4184 sc->temp = temp; 4185 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 4186 } 4187 } 4188 4189 /* 4190 * Set TX power for current channel (each rate has its own power settings). 4191 * This function takes into account the regulatory information from EEPROM, 4192 * the current temperature and the current voltage. 4193 */ 4194 static int 4195 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4196 int async) 4197 { 4198 /* Fixed-point arithmetic division using a n-bit fractional part. */ 4199 #define fdivround(a, b, n) \ 4200 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 4201 /* Linear interpolation. */ 4202 #define interpolate(x, x1, y1, x2, y2, n) \ 4203 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 4204 4205 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 4206 struct iwn_ucode_info *uc = &sc->ucode_info; 4207 struct iwn4965_cmd_txpower cmd; 4208 struct iwn4965_eeprom_chan_samples *chans; 4209 const uint8_t *rf_gain, *dsp_gain; 4210 int32_t vdiff, tdiff; 4211 int i, c, grp, maxpwr; 4212 uint8_t chan; 4213 4214 /* Retrieve current channel from last RXON. */ 4215 chan = sc->rxon.chan; 4216 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 4217 chan); 4218 4219 memset(&cmd, 0, sizeof cmd); 4220 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 4221 cmd.chan = chan; 4222 4223 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 4224 maxpwr = sc->maxpwr5GHz; 4225 rf_gain = iwn4965_rf_gain_5ghz; 4226 dsp_gain = iwn4965_dsp_gain_5ghz; 4227 } else { 4228 maxpwr = sc->maxpwr2GHz; 4229 rf_gain = iwn4965_rf_gain_2ghz; 4230 dsp_gain = iwn4965_dsp_gain_2ghz; 4231 } 4232 4233 /* Compute voltage compensation. */ 4234 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 4235 if (vdiff > 0) 4236 vdiff *= 2; 4237 if (abs(vdiff) > 2) 4238 vdiff = 0; 4239 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4240 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 4241 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 4242 4243 /* Get channel attenuation group. */ 4244 if (chan <= 20) /* 1-20 */ 4245 grp = 4; 4246 else if (chan <= 43) /* 34-43 */ 4247 grp = 0; 4248 else if (chan <= 70) /* 44-70 */ 4249 grp = 1; 4250 else if (chan <= 124) /* 71-124 */ 4251 grp = 2; 4252 else /* 125-200 */ 4253 grp = 3; 4254 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4255 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 4256 4257 /* Get channel sub-band. */ 4258 for (i = 0; i < IWN_NBANDS; i++) 4259 if (sc->bands[i].lo != 0 && 4260 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 4261 break; 4262 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 4263 return EINVAL; 4264 chans = sc->bands[i].chans; 4265 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4266 "%s: chan %d sub-band=%d\n", __func__, chan, i); 4267 4268 for (c = 0; c < 2; c++) { 4269 uint8_t power, gain, temp; 4270 int maxchpwr, pwr, ridx, idx; 4271 4272 power = interpolate(chan, 4273 chans[0].num, chans[0].samples[c][1].power, 4274 chans[1].num, chans[1].samples[c][1].power, 1); 4275 gain = interpolate(chan, 4276 chans[0].num, chans[0].samples[c][1].gain, 4277 chans[1].num, chans[1].samples[c][1].gain, 1); 4278 temp = interpolate(chan, 4279 chans[0].num, chans[0].samples[c][1].temp, 4280 chans[1].num, chans[1].samples[c][1].temp, 1); 4281 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4282 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 4283 __func__, c, power, gain, temp); 4284 4285 /* Compute temperature compensation. */ 4286 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 4287 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4288 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 4289 __func__, tdiff, sc->temp, temp); 4290 4291 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 4292 /* Convert dBm to half-dBm. */ 4293 maxchpwr = sc->maxpwr[chan] * 2; 4294 if ((ridx / 8) & 1) 4295 maxchpwr -= 6; /* MIMO 2T: -3dB */ 4296 4297 pwr = maxpwr; 4298 4299 /* Adjust TX power based on rate. */ 4300 if ((ridx % 8) == 5) 4301 pwr -= 15; /* OFDM48: -7.5dB */ 4302 else if ((ridx % 8) == 6) 4303 pwr -= 17; /* OFDM54: -8.5dB */ 4304 else if ((ridx % 8) == 7) 4305 pwr -= 20; /* OFDM60: -10dB */ 4306 else 4307 pwr -= 10; /* Others: -5dB */ 4308 4309 /* Do not exceed channel max TX power. */ 4310 if (pwr > maxchpwr) 4311 pwr = maxchpwr; 4312 4313 idx = gain - (pwr - power) - tdiff - vdiff; 4314 if ((ridx / 8) & 1) /* MIMO */ 4315 idx += (int32_t)le32toh(uc->atten[grp][c]); 4316 4317 if (cmd.band == 0) 4318 idx += 9; /* 5GHz */ 4319 if (ridx == IWN_RIDX_MAX) 4320 idx += 5; /* CCK */ 4321 4322 /* Make sure idx stays in a valid range. */ 4323 if (idx < 0) 4324 idx = 0; 4325 else if (idx > IWN4965_MAX_PWR_INDEX) 4326 idx = IWN4965_MAX_PWR_INDEX; 4327 4328 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4329 "%s: Tx chain %d, rate idx %d: power=%d\n", 4330 __func__, c, ridx, idx); 4331 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 4332 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 4333 } 4334 } 4335 4336 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4337 "%s: set tx power for chan %d\n", __func__, chan); 4338 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 4339 4340 #undef interpolate 4341 #undef fdivround 4342 } 4343 4344 static int 4345 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4346 int async) 4347 { 4348 struct iwn5000_cmd_txpower cmd; 4349 4350 /* 4351 * TX power calibration is handled automatically by the firmware 4352 * for 5000 Series. 4353 */ 4354 memset(&cmd, 0, sizeof cmd); 4355 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 4356 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 4357 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 4358 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 4359 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 4360 } 4361 4362 /* 4363 * Retrieve the maximum RSSI (in dBm) among receivers. 4364 */ 4365 static int 4366 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4367 { 4368 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 4369 uint8_t mask, agc; 4370 int rssi; 4371 4372 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 4373 agc = (le16toh(phy->agc) >> 7) & 0x7f; 4374 4375 rssi = 0; 4376 if (mask & IWN_ANT_A) 4377 rssi = MAX(rssi, phy->rssi[0]); 4378 if (mask & IWN_ANT_B) 4379 rssi = MAX(rssi, phy->rssi[2]); 4380 if (mask & IWN_ANT_C) 4381 rssi = MAX(rssi, phy->rssi[4]); 4382 4383 DPRINTF(sc, IWN_DEBUG_RECV, 4384 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 4385 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 4386 rssi - agc - IWN_RSSI_TO_DBM); 4387 return rssi - agc - IWN_RSSI_TO_DBM; 4388 } 4389 4390 static int 4391 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4392 { 4393 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 4394 uint8_t agc; 4395 int rssi; 4396 4397 agc = (le32toh(phy->agc) >> 9) & 0x7f; 4398 4399 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 4400 le16toh(phy->rssi[1]) & 0xff); 4401 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 4402 4403 DPRINTF(sc, IWN_DEBUG_RECV, 4404 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 4405 phy->rssi[0], phy->rssi[1], phy->rssi[2], 4406 rssi - agc - IWN_RSSI_TO_DBM); 4407 return rssi - agc - IWN_RSSI_TO_DBM; 4408 } 4409 4410 /* 4411 * Retrieve the average noise (in dBm) among receivers. 4412 */ 4413 static int 4414 iwn_get_noise(const struct iwn_rx_general_stats *stats) 4415 { 4416 int i, total, nbant, noise; 4417 4418 total = nbant = 0; 4419 for (i = 0; i < 3; i++) { 4420 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 4421 continue; 4422 total += noise; 4423 nbant++; 4424 } 4425 /* There should be at least one antenna but check anyway. */ 4426 return (nbant == 0) ? -127 : (total / nbant) - 107; 4427 } 4428 4429 /* 4430 * Compute temperature (in degC) from last received statistics. 4431 */ 4432 static int 4433 iwn4965_get_temperature(struct iwn_softc *sc) 4434 { 4435 struct iwn_ucode_info *uc = &sc->ucode_info; 4436 int32_t r1, r2, r3, r4, temp; 4437 4438 r1 = le32toh(uc->temp[0].chan20MHz); 4439 r2 = le32toh(uc->temp[1].chan20MHz); 4440 r3 = le32toh(uc->temp[2].chan20MHz); 4441 r4 = le32toh(sc->rawtemp); 4442 4443 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 4444 return 0; 4445 4446 /* Sign-extend 23-bit R4 value to 32-bit. */ 4447 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 4448 /* Compute temperature in Kelvin. */ 4449 temp = (259 * (r4 - r2)) / (r3 - r1); 4450 temp = (temp * 97) / 100 + 8; 4451 4452 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4453 IWN_KTOC(temp)); 4454 return IWN_KTOC(temp); 4455 } 4456 4457 static int 4458 iwn5000_get_temperature(struct iwn_softc *sc) 4459 { 4460 int32_t temp; 4461 4462 /* 4463 * Temperature is not used by the driver for 5000 Series because 4464 * TX power calibration is handled by firmware. 4465 */ 4466 temp = le32toh(sc->rawtemp); 4467 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4468 temp = (temp / -5) + sc->temp_off; 4469 temp = IWN_KTOC(temp); 4470 } 4471 return temp; 4472 } 4473 4474 /* 4475 * Initialize sensitivity calibration state machine. 4476 */ 4477 static int 4478 iwn_init_sensitivity(struct iwn_softc *sc) 4479 { 4480 struct iwn_ops *ops = &sc->ops; 4481 struct iwn_calib_state *calib = &sc->calib; 4482 uint32_t flags; 4483 int error; 4484 4485 /* Reset calibration state machine. */ 4486 memset(calib, 0, sizeof (*calib)); 4487 calib->state = IWN_CALIB_STATE_INIT; 4488 calib->cck_state = IWN_CCK_STATE_HIFA; 4489 /* Set initial correlation values. */ 4490 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4491 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4492 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4493 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4494 calib->cck_x4 = 125; 4495 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4496 calib->energy_cck = sc->limits->energy_cck; 4497 4498 /* Write initial sensitivity. */ 4499 if ((error = iwn_send_sensitivity(sc)) != 0) 4500 return error; 4501 4502 /* Write initial gains. */ 4503 if ((error = ops->init_gains(sc)) != 0) 4504 return error; 4505 4506 /* Request statistics at each beacon interval. */ 4507 flags = 0; 4508 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 4509 __func__); 4510 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4511 } 4512 4513 /* 4514 * Collect noise and RSSI statistics for the first 20 beacons received 4515 * after association and use them to determine connected antennas and 4516 * to set differential gains. 4517 */ 4518 static void 4519 iwn_collect_noise(struct iwn_softc *sc, 4520 const struct iwn_rx_general_stats *stats) 4521 { 4522 struct iwn_ops *ops = &sc->ops; 4523 struct iwn_calib_state *calib = &sc->calib; 4524 uint32_t val; 4525 int i; 4526 4527 /* Accumulate RSSI and noise for all 3 antennas. */ 4528 for (i = 0; i < 3; i++) { 4529 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4530 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4531 } 4532 /* NB: We update differential gains only once after 20 beacons. */ 4533 if (++calib->nbeacons < 20) 4534 return; 4535 4536 /* Determine highest average RSSI. */ 4537 val = MAX(calib->rssi[0], calib->rssi[1]); 4538 val = MAX(calib->rssi[2], val); 4539 4540 /* Determine which antennas are connected. */ 4541 sc->chainmask = sc->rxchainmask; 4542 for (i = 0; i < 3; i++) 4543 if (val - calib->rssi[i] > 15 * 20) 4544 sc->chainmask &= ~(1 << i); 4545 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4546 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4547 __func__, sc->rxchainmask, sc->chainmask); 4548 4549 /* If none of the TX antennas are connected, keep at least one. */ 4550 if ((sc->chainmask & sc->txchainmask) == 0) 4551 sc->chainmask |= IWN_LSB(sc->txchainmask); 4552 4553 (void)ops->set_gains(sc); 4554 calib->state = IWN_CALIB_STATE_RUN; 4555 4556 #ifdef notyet 4557 /* XXX Disable RX chains with no antennas connected. */ 4558 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4559 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4560 #endif 4561 4562 #if 0 4563 /* XXX: not yet */ 4564 /* Enable power-saving mode if requested by user. */ 4565 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 4566 (void)iwn_set_pslevel(sc, 0, 3, 1); 4567 #endif 4568 } 4569 4570 static int 4571 iwn4965_init_gains(struct iwn_softc *sc) 4572 { 4573 struct iwn_phy_calib_gain cmd; 4574 4575 memset(&cmd, 0, sizeof cmd); 4576 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4577 /* Differential gains initially set to 0 for all 3 antennas. */ 4578 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4579 "%s: setting initial differential gains\n", __func__); 4580 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4581 } 4582 4583 static int 4584 iwn5000_init_gains(struct iwn_softc *sc) 4585 { 4586 struct iwn_phy_calib cmd; 4587 4588 memset(&cmd, 0, sizeof cmd); 4589 cmd.code = sc->reset_noise_gain; 4590 cmd.ngroups = 1; 4591 cmd.isvalid = 1; 4592 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4593 "%s: setting initial differential gains\n", __func__); 4594 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4595 } 4596 4597 static int 4598 iwn4965_set_gains(struct iwn_softc *sc) 4599 { 4600 struct iwn_calib_state *calib = &sc->calib; 4601 struct iwn_phy_calib_gain cmd; 4602 int i, delta, noise; 4603 4604 /* Get minimal noise among connected antennas. */ 4605 noise = INT_MAX; /* NB: There's at least one antenna. */ 4606 for (i = 0; i < 3; i++) 4607 if (sc->chainmask & (1 << i)) 4608 noise = MIN(calib->noise[i], noise); 4609 4610 memset(&cmd, 0, sizeof cmd); 4611 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4612 /* Set differential gains for connected antennas. */ 4613 for (i = 0; i < 3; i++) { 4614 if (sc->chainmask & (1 << i)) { 4615 /* Compute attenuation (in unit of 1.5dB). */ 4616 delta = (noise - (int32_t)calib->noise[i]) / 30; 4617 /* NB: delta <= 0 */ 4618 /* Limit to [-4.5dB,0]. */ 4619 cmd.gain[i] = MIN(abs(delta), 3); 4620 if (delta < 0) 4621 cmd.gain[i] |= 1 << 2; /* sign bit */ 4622 } 4623 } 4624 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4625 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4626 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4627 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4628 } 4629 4630 static int 4631 iwn5000_set_gains(struct iwn_softc *sc) 4632 { 4633 struct iwn_calib_state *calib = &sc->calib; 4634 struct iwn_phy_calib_gain cmd; 4635 int i, ant, div, delta; 4636 4637 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4638 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4639 4640 memset(&cmd, 0, sizeof cmd); 4641 cmd.code = sc->noise_gain; 4642 cmd.ngroups = 1; 4643 cmd.isvalid = 1; 4644 /* Get first available RX antenna as referential. */ 4645 ant = IWN_LSB(sc->rxchainmask); 4646 /* Set differential gains for other antennas. */ 4647 for (i = ant + 1; i < 3; i++) { 4648 if (sc->chainmask & (1 << i)) { 4649 /* The delta is relative to antenna "ant". */ 4650 delta = ((int32_t)calib->noise[ant] - 4651 (int32_t)calib->noise[i]) / div; 4652 /* Limit to [-4.5dB,+4.5dB]. */ 4653 cmd.gain[i - 1] = MIN(abs(delta), 3); 4654 if (delta < 0) 4655 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4656 } 4657 } 4658 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4659 "setting differential gains Ant B/C: %x/%x (%x)\n", 4660 cmd.gain[0], cmd.gain[1], sc->chainmask); 4661 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4662 } 4663 4664 /* 4665 * Tune RF RX sensitivity based on the number of false alarms detected 4666 * during the last beacon period. 4667 */ 4668 static void 4669 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4670 { 4671 #define inc(val, inc, max) \ 4672 if ((val) < (max)) { \ 4673 if ((val) < (max) - (inc)) \ 4674 (val) += (inc); \ 4675 else \ 4676 (val) = (max); \ 4677 needs_update = 1; \ 4678 } 4679 #define dec(val, dec, min) \ 4680 if ((val) > (min)) { \ 4681 if ((val) > (min) + (dec)) \ 4682 (val) -= (dec); \ 4683 else \ 4684 (val) = (min); \ 4685 needs_update = 1; \ 4686 } 4687 4688 const struct iwn_sensitivity_limits *limits = sc->limits; 4689 struct iwn_calib_state *calib = &sc->calib; 4690 uint32_t val, rxena, fa; 4691 uint32_t energy[3], energy_min; 4692 uint8_t noise[3], noise_ref; 4693 int i, needs_update = 0; 4694 4695 /* Check that we've been enabled long enough. */ 4696 if ((rxena = le32toh(stats->general.load)) == 0) 4697 return; 4698 4699 /* Compute number of false alarms since last call for OFDM. */ 4700 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4701 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 4702 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4703 4704 /* Save counters values for next call. */ 4705 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 4706 calib->fa_ofdm = le32toh(stats->ofdm.fa); 4707 4708 if (fa > 50 * rxena) { 4709 /* High false alarm count, decrease sensitivity. */ 4710 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4711 "%s: OFDM high false alarm count: %u\n", __func__, fa); 4712 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4713 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4714 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4715 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4716 4717 } else if (fa < 5 * rxena) { 4718 /* Low false alarm count, increase sensitivity. */ 4719 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4720 "%s: OFDM low false alarm count: %u\n", __func__, fa); 4721 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4722 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4723 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4724 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4725 } 4726 4727 /* Compute maximum noise among 3 receivers. */ 4728 for (i = 0; i < 3; i++) 4729 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 4730 val = MAX(noise[0], noise[1]); 4731 val = MAX(noise[2], val); 4732 /* Insert it into our samples table. */ 4733 calib->noise_samples[calib->cur_noise_sample] = val; 4734 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4735 4736 /* Compute maximum noise among last 20 samples. */ 4737 noise_ref = calib->noise_samples[0]; 4738 for (i = 1; i < 20; i++) 4739 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4740 4741 /* Compute maximum energy among 3 receivers. */ 4742 for (i = 0; i < 3; i++) 4743 energy[i] = le32toh(stats->general.energy[i]); 4744 val = MIN(energy[0], energy[1]); 4745 val = MIN(energy[2], val); 4746 /* Insert it into our samples table. */ 4747 calib->energy_samples[calib->cur_energy_sample] = val; 4748 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4749 4750 /* Compute minimum energy among last 10 samples. */ 4751 energy_min = calib->energy_samples[0]; 4752 for (i = 1; i < 10; i++) 4753 energy_min = MAX(energy_min, calib->energy_samples[i]); 4754 energy_min += 6; 4755 4756 /* Compute number of false alarms since last call for CCK. */ 4757 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4758 fa += le32toh(stats->cck.fa) - calib->fa_cck; 4759 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4760 4761 /* Save counters values for next call. */ 4762 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 4763 calib->fa_cck = le32toh(stats->cck.fa); 4764 4765 if (fa > 50 * rxena) { 4766 /* High false alarm count, decrease sensitivity. */ 4767 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4768 "%s: CCK high false alarm count: %u\n", __func__, fa); 4769 calib->cck_state = IWN_CCK_STATE_HIFA; 4770 calib->low_fa = 0; 4771 4772 if (calib->cck_x4 > 160) { 4773 calib->noise_ref = noise_ref; 4774 if (calib->energy_cck > 2) 4775 dec(calib->energy_cck, 2, energy_min); 4776 } 4777 if (calib->cck_x4 < 160) { 4778 calib->cck_x4 = 161; 4779 needs_update = 1; 4780 } else 4781 inc(calib->cck_x4, 3, limits->max_cck_x4); 4782 4783 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4784 4785 } else if (fa < 5 * rxena) { 4786 /* Low false alarm count, increase sensitivity. */ 4787 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4788 "%s: CCK low false alarm count: %u\n", __func__, fa); 4789 calib->cck_state = IWN_CCK_STATE_LOFA; 4790 calib->low_fa++; 4791 4792 if (calib->cck_state != IWN_CCK_STATE_INIT && 4793 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4794 calib->low_fa > 100)) { 4795 inc(calib->energy_cck, 2, limits->min_energy_cck); 4796 dec(calib->cck_x4, 3, limits->min_cck_x4); 4797 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4798 } 4799 } else { 4800 /* Not worth to increase or decrease sensitivity. */ 4801 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4802 "%s: CCK normal false alarm count: %u\n", __func__, fa); 4803 calib->low_fa = 0; 4804 calib->noise_ref = noise_ref; 4805 4806 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4807 /* Previous interval had many false alarms. */ 4808 dec(calib->energy_cck, 8, energy_min); 4809 } 4810 calib->cck_state = IWN_CCK_STATE_INIT; 4811 } 4812 4813 if (needs_update) 4814 (void)iwn_send_sensitivity(sc); 4815 #undef dec 4816 #undef inc 4817 } 4818 4819 static int 4820 iwn_send_sensitivity(struct iwn_softc *sc) 4821 { 4822 struct iwn_calib_state *calib = &sc->calib; 4823 struct iwn_enhanced_sensitivity_cmd cmd; 4824 int len; 4825 4826 memset(&cmd, 0, sizeof cmd); 4827 len = sizeof (struct iwn_sensitivity_cmd); 4828 cmd.which = IWN_SENSITIVITY_WORKTBL; 4829 /* OFDM modulation. */ 4830 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4831 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4832 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4833 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4834 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4835 cmd.energy_ofdm_th = htole16(62); 4836 /* CCK modulation. */ 4837 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4838 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4839 cmd.energy_cck = htole16(calib->energy_cck); 4840 /* Barker modulation: use default values. */ 4841 cmd.corr_barker = htole16(190); 4842 cmd.corr_barker_mrc = htole16(390); 4843 4844 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4845 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 4846 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 4847 calib->ofdm_mrc_x4, calib->cck_x4, 4848 calib->cck_mrc_x4, calib->energy_cck); 4849 4850 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4851 goto send; 4852 /* Enhanced sensitivity settings. */ 4853 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4854 cmd.ofdm_det_slope_mrc = htole16(668); 4855 cmd.ofdm_det_icept_mrc = htole16(4); 4856 cmd.ofdm_det_slope = htole16(486); 4857 cmd.ofdm_det_icept = htole16(37); 4858 cmd.cck_det_slope_mrc = htole16(853); 4859 cmd.cck_det_icept_mrc = htole16(4); 4860 cmd.cck_det_slope = htole16(476); 4861 cmd.cck_det_icept = htole16(99); 4862 send: 4863 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4864 } 4865 4866 /* 4867 * Set STA mode power saving level (between 0 and 5). 4868 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4869 */ 4870 static int 4871 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4872 { 4873 struct iwn_pmgt_cmd cmd; 4874 const struct iwn_pmgt *pmgt; 4875 uint32_t max, skip_dtim; 4876 uint32_t reg; 4877 int i; 4878 4879 /* Select which PS parameters to use. */ 4880 if (dtim <= 2) 4881 pmgt = &iwn_pmgt[0][level]; 4882 else if (dtim <= 10) 4883 pmgt = &iwn_pmgt[1][level]; 4884 else 4885 pmgt = &iwn_pmgt[2][level]; 4886 4887 memset(&cmd, 0, sizeof cmd); 4888 if (level != 0) /* not CAM */ 4889 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4890 if (level == 5) 4891 cmd.flags |= htole16(IWN_PS_FAST_PD); 4892 /* Retrieve PCIe Active State Power Management (ASPM). */ 4893 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4894 if (!(reg & 0x1)) /* L0s Entry disabled. */ 4895 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4896 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4897 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4898 4899 if (dtim == 0) { 4900 dtim = 1; 4901 skip_dtim = 0; 4902 } else 4903 skip_dtim = pmgt->skip_dtim; 4904 if (skip_dtim != 0) { 4905 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4906 max = pmgt->intval[4]; 4907 if (max == (uint32_t)-1) 4908 max = dtim * (skip_dtim + 1); 4909 else if (max > dtim) 4910 max = (max / dtim) * dtim; 4911 } else 4912 max = dtim; 4913 for (i = 0; i < 5; i++) 4914 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4915 4916 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 4917 level); 4918 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4919 } 4920 4921 static int 4922 iwn_send_btcoex(struct iwn_softc *sc) 4923 { 4924 struct iwn_bluetooth cmd; 4925 4926 memset(&cmd, 0, sizeof cmd); 4927 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4928 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 4929 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 4930 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 4931 __func__); 4932 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 4933 } 4934 4935 static int 4936 iwn_send_advanced_btcoex(struct iwn_softc *sc) 4937 { 4938 static const uint32_t btcoex_3wire[12] = { 4939 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 4940 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 4941 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 4942 }; 4943 struct iwn6000_btcoex_config btconfig; 4944 struct iwn_btcoex_priotable btprio; 4945 struct iwn_btcoex_prot btprot; 4946 int error, i; 4947 4948 memset(&btconfig, 0, sizeof btconfig); 4949 btconfig.flags = 145; 4950 btconfig.max_kill = 5; 4951 btconfig.bt3_t7_timer = 1; 4952 btconfig.kill_ack = htole32(0xffff0000); 4953 btconfig.kill_cts = htole32(0xffff0000); 4954 btconfig.sample_time = 2; 4955 btconfig.bt3_t2_timer = 0xc; 4956 for (i = 0; i < 12; i++) 4957 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 4958 btconfig.valid = htole16(0xff); 4959 btconfig.prio_boost = 0xf0; 4960 DPRINTF(sc, IWN_DEBUG_RESET, 4961 "%s: configuring advanced bluetooth coexistence\n", __func__); 4962 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1); 4963 if (error != 0) 4964 return error; 4965 4966 memset(&btprio, 0, sizeof btprio); 4967 btprio.calib_init1 = 0x6; 4968 btprio.calib_init2 = 0x7; 4969 btprio.calib_periodic_low1 = 0x2; 4970 btprio.calib_periodic_low2 = 0x3; 4971 btprio.calib_periodic_high1 = 0x4; 4972 btprio.calib_periodic_high2 = 0x5; 4973 btprio.dtim = 0x6; 4974 btprio.scan52 = 0x8; 4975 btprio.scan24 = 0xa; 4976 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 4977 1); 4978 if (error != 0) 4979 return error; 4980 4981 /* Force BT state machine change. */ 4982 memset(&btprot, 0, sizeof btprio); 4983 btprot.open = 1; 4984 btprot.type = 1; 4985 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 4986 if (error != 0) 4987 return error; 4988 btprot.open = 0; 4989 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 4990 } 4991 4992 static int 4993 iwn5000_runtime_calib(struct iwn_softc *sc) 4994 { 4995 struct iwn5000_calib_config cmd; 4996 4997 memset(&cmd, 0, sizeof cmd); 4998 cmd.ucode.once.enable = 0xffffffff; 4999 cmd.ucode.once.start = IWN5000_CALIB_DC; 5000 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5001 "%s: configuring runtime calibration\n", __func__); 5002 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 5003 } 5004 5005 static int 5006 iwn_config(struct iwn_softc *sc) 5007 { 5008 struct iwn_ops *ops = &sc->ops; 5009 struct ifnet *ifp = sc->sc_ifp; 5010 struct ieee80211com *ic = ifp->if_l2com; 5011 uint32_t txmask; 5012 uint16_t rxchain; 5013 int error; 5014 5015 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 5016 /* Set radio temperature sensor offset. */ 5017 error = iwn5000_temp_offset_calib(sc); 5018 if (error != 0) { 5019 device_printf(sc->sc_dev, 5020 "%s: could not set temperature offset\n", __func__); 5021 return error; 5022 } 5023 } 5024 5025 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5026 /* Configure runtime DC calibration. */ 5027 error = iwn5000_runtime_calib(sc); 5028 if (error != 0) { 5029 device_printf(sc->sc_dev, 5030 "%s: could not configure runtime calibration\n", 5031 __func__); 5032 return error; 5033 } 5034 } 5035 5036 /* Configure valid TX chains for >=5000 Series. */ 5037 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 5038 txmask = htole32(sc->txchainmask); 5039 DPRINTF(sc, IWN_DEBUG_RESET, 5040 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 5041 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 5042 sizeof txmask, 0); 5043 if (error != 0) { 5044 device_printf(sc->sc_dev, 5045 "%s: could not configure valid TX chains, " 5046 "error %d\n", __func__, error); 5047 return error; 5048 } 5049 } 5050 5051 /* Configure bluetooth coexistence. */ 5052 if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX) 5053 error = iwn_send_advanced_btcoex(sc); 5054 else 5055 error = iwn_send_btcoex(sc); 5056 if (error != 0) { 5057 device_printf(sc->sc_dev, 5058 "%s: could not configure bluetooth coexistence, error %d\n", 5059 __func__, error); 5060 return error; 5061 } 5062 5063 /* Set mode, channel, RX filter and enable RX. */ 5064 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 5065 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 5066 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 5067 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 5068 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5069 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 5070 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5071 switch (ic->ic_opmode) { 5072 case IEEE80211_M_STA: 5073 sc->rxon.mode = IWN_MODE_STA; 5074 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 5075 break; 5076 case IEEE80211_M_MONITOR: 5077 sc->rxon.mode = IWN_MODE_MONITOR; 5078 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 5079 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 5080 break; 5081 default: 5082 /* Should not get there. */ 5083 break; 5084 } 5085 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 5086 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 5087 sc->rxon.ht_single_mask = 0xff; 5088 sc->rxon.ht_dual_mask = 0xff; 5089 sc->rxon.ht_triple_mask = 0xff; 5090 rxchain = 5091 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5092 IWN_RXCHAIN_MIMO_COUNT(2) | 5093 IWN_RXCHAIN_IDLE_COUNT(2); 5094 sc->rxon.rxchain = htole16(rxchain); 5095 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 5096 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 5097 if (error != 0) { 5098 device_printf(sc->sc_dev, "%s: RXON command failed\n", 5099 __func__); 5100 return error; 5101 } 5102 5103 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 5104 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 5105 __func__); 5106 return error; 5107 } 5108 5109 /* Configuration has changed, set TX power accordingly. */ 5110 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 5111 device_printf(sc->sc_dev, "%s: could not set TX power\n", 5112 __func__); 5113 return error; 5114 } 5115 5116 if ((error = iwn_set_critical_temp(sc)) != 0) { 5117 device_printf(sc->sc_dev, 5118 "%s: could not set critical temperature\n", __func__); 5119 return error; 5120 } 5121 5122 /* Set power saving level to CAM during initialization. */ 5123 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 5124 device_printf(sc->sc_dev, 5125 "%s: could not set power saving level\n", __func__); 5126 return error; 5127 } 5128 return 0; 5129 } 5130 5131 /* 5132 * Add an ssid element to a frame. 5133 */ 5134 static uint8_t * 5135 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 5136 { 5137 *frm++ = IEEE80211_ELEMID_SSID; 5138 *frm++ = len; 5139 memcpy(frm, ssid, len); 5140 return frm + len; 5141 } 5142 5143 static int 5144 iwn_scan(struct iwn_softc *sc) 5145 { 5146 struct ifnet *ifp = sc->sc_ifp; 5147 struct ieee80211com *ic = ifp->if_l2com; 5148 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 5149 struct ieee80211_node *ni = ss->ss_vap->iv_bss; 5150 struct iwn_scan_hdr *hdr; 5151 struct iwn_cmd_data *tx; 5152 struct iwn_scan_essid *essid; 5153 struct iwn_scan_chan *chan; 5154 struct ieee80211_frame *wh; 5155 struct ieee80211_rateset *rs; 5156 struct ieee80211_channel *c; 5157 uint8_t *buf, *frm; 5158 uint16_t rxchain; 5159 uint8_t txant; 5160 int buflen, error; 5161 5162 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 5163 if (buf == NULL) { 5164 device_printf(sc->sc_dev, 5165 "%s: could not allocate buffer for scan command\n", 5166 __func__); 5167 return ENOMEM; 5168 } 5169 hdr = (struct iwn_scan_hdr *)buf; 5170 /* 5171 * Move to the next channel if no frames are received within 10ms 5172 * after sending the probe request. 5173 */ 5174 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 5175 hdr->quiet_threshold = htole16(1); /* min # of packets */ 5176 5177 /* Select antennas for scanning. */ 5178 rxchain = 5179 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5180 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 5181 IWN_RXCHAIN_DRIVER_FORCE; 5182 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 5183 sc->hw_type == IWN_HW_REV_TYPE_4965) { 5184 /* Ant A must be avoided in 5GHz because of an HW bug. */ 5185 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 5186 } else /* Use all available RX antennas. */ 5187 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 5188 hdr->rxchain = htole16(rxchain); 5189 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 5190 5191 tx = (struct iwn_cmd_data *)(hdr + 1); 5192 tx->flags = htole32(IWN_TX_AUTO_SEQ); 5193 tx->id = sc->broadcast_id; 5194 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 5195 5196 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { 5197 /* Send probe requests at 6Mbps. */ 5198 tx->rate = htole32(0xd); 5199 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 5200 } else { 5201 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 5202 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 5203 sc->rxon.associd && sc->rxon.chan > 14) 5204 tx->rate = htole32(0xd); 5205 else { 5206 /* Send probe requests at 1Mbps. */ 5207 tx->rate = htole32(10 | IWN_RFLAG_CCK); 5208 } 5209 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 5210 } 5211 /* Use the first valid TX antenna. */ 5212 txant = IWN_LSB(sc->txchainmask); 5213 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 5214 5215 essid = (struct iwn_scan_essid *)(tx + 1); 5216 if (ss->ss_ssid[0].len != 0) { 5217 essid[0].id = IEEE80211_ELEMID_SSID; 5218 essid[0].len = ss->ss_ssid[0].len; 5219 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 5220 } 5221 /* 5222 * Build a probe request frame. Most of the following code is a 5223 * copy & paste of what is done in net80211. 5224 */ 5225 wh = (struct ieee80211_frame *)(essid + 20); 5226 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 5227 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 5228 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 5229 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 5230 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 5231 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 5232 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 5233 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 5234 5235 frm = (uint8_t *)(wh + 1); 5236 frm = ieee80211_add_ssid(frm, NULL, 0); 5237 frm = ieee80211_add_rates(frm, rs); 5238 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 5239 frm = ieee80211_add_xrates(frm, rs); 5240 if (ic->ic_htcaps & IEEE80211_HTC_HT) 5241 frm = ieee80211_add_htcap(frm, ni); 5242 5243 /* Set length of probe request. */ 5244 tx->len = htole16(frm - (uint8_t *)wh); 5245 5246 c = ic->ic_curchan; 5247 chan = (struct iwn_scan_chan *)frm; 5248 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 5249 chan->flags = 0; 5250 if (ss->ss_nssid > 0) 5251 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 5252 chan->dsp_gain = 0x6e; 5253 if (IEEE80211_IS_CHAN_5GHZ(c) && 5254 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5255 chan->rf_gain = 0x3b; 5256 chan->active = htole16(24); 5257 chan->passive = htole16(110); 5258 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5259 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 5260 chan->rf_gain = 0x3b; 5261 chan->active = htole16(24); 5262 if (sc->rxon.associd) 5263 chan->passive = htole16(78); 5264 else 5265 chan->passive = htole16(110); 5266 hdr->crc_threshold = 0xffff; 5267 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5268 chan->rf_gain = 0x28; 5269 chan->active = htole16(36); 5270 chan->passive = htole16(120); 5271 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5272 } else { 5273 chan->rf_gain = 0x28; 5274 chan->active = htole16(36); 5275 if (sc->rxon.associd) 5276 chan->passive = htole16(88); 5277 else 5278 chan->passive = htole16(120); 5279 hdr->crc_threshold = 0xffff; 5280 } 5281 5282 DPRINTF(sc, IWN_DEBUG_STATE, 5283 "%s: chan %u flags 0x%x rf_gain 0x%x " 5284 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 5285 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 5286 chan->active, chan->passive); 5287 5288 hdr->nchan++; 5289 chan++; 5290 buflen = (uint8_t *)chan - buf; 5291 hdr->len = htole16(buflen); 5292 5293 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 5294 hdr->nchan); 5295 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 5296 free(buf, M_DEVBUF); 5297 return error; 5298 } 5299 5300 static int 5301 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 5302 { 5303 struct iwn_ops *ops = &sc->ops; 5304 struct ifnet *ifp = sc->sc_ifp; 5305 struct ieee80211com *ic = ifp->if_l2com; 5306 struct ieee80211_node *ni = vap->iv_bss; 5307 int error; 5308 5309 /* Update adapter configuration. */ 5310 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 5311 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5312 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5313 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5314 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5315 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5316 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5317 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5318 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5319 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5320 sc->rxon.cck_mask = 0; 5321 sc->rxon.ofdm_mask = 0x15; 5322 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5323 sc->rxon.cck_mask = 0x03; 5324 sc->rxon.ofdm_mask = 0; 5325 } else { 5326 /* Assume 802.11b/g. */ 5327 sc->rxon.cck_mask = 0x0f; 5328 sc->rxon.ofdm_mask = 0x15; 5329 } 5330 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 5331 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 5332 sc->rxon.ofdm_mask); 5333 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5334 if (error != 0) { 5335 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 5336 __func__, error); 5337 return error; 5338 } 5339 5340 /* Configuration has changed, set TX power accordingly. */ 5341 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5342 device_printf(sc->sc_dev, 5343 "%s: could not set TX power, error %d\n", __func__, error); 5344 return error; 5345 } 5346 /* 5347 * Reconfiguring RXON clears the firmware nodes table so we must 5348 * add the broadcast node again. 5349 */ 5350 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 5351 device_printf(sc->sc_dev, 5352 "%s: could not add broadcast node, error %d\n", __func__, 5353 error); 5354 return error; 5355 } 5356 return 0; 5357 } 5358 5359 static int 5360 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 5361 { 5362 struct iwn_ops *ops = &sc->ops; 5363 struct ifnet *ifp = sc->sc_ifp; 5364 struct ieee80211com *ic = ifp->if_l2com; 5365 struct ieee80211_node *ni = vap->iv_bss; 5366 struct iwn_node_info node; 5367 uint32_t htflags = 0; 5368 int error; 5369 5370 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5371 /* Link LED blinks while monitoring. */ 5372 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 5373 return 0; 5374 } 5375 if ((error = iwn_set_timing(sc, ni)) != 0) { 5376 device_printf(sc->sc_dev, 5377 "%s: could not set timing, error %d\n", __func__, error); 5378 return error; 5379 } 5380 5381 /* Update adapter configuration. */ 5382 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 5383 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 5384 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5385 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5386 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5387 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5388 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5389 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5390 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5391 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5392 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5393 sc->rxon.cck_mask = 0; 5394 sc->rxon.ofdm_mask = 0x15; 5395 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5396 sc->rxon.cck_mask = 0x03; 5397 sc->rxon.ofdm_mask = 0; 5398 } else { 5399 /* Assume 802.11b/g. */ 5400 sc->rxon.cck_mask = 0x0f; 5401 sc->rxon.ofdm_mask = 0x15; 5402 } 5403 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5404 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 5405 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 5406 switch (ic->ic_curhtprotmode) { 5407 case IEEE80211_HTINFO_OPMODE_HT20PR: 5408 htflags |= IWN_RXON_HT_MODEPURE40; 5409 break; 5410 default: 5411 htflags |= IWN_RXON_HT_MODEMIXED; 5412 break; 5413 } 5414 } 5415 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 5416 htflags |= IWN_RXON_HT_HT40MINUS; 5417 } 5418 sc->rxon.flags |= htole32(htflags); 5419 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 5420 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 5421 sc->rxon.chan, sc->rxon.flags); 5422 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5423 if (error != 0) { 5424 device_printf(sc->sc_dev, 5425 "%s: could not update configuration, error %d\n", __func__, 5426 error); 5427 return error; 5428 } 5429 5430 /* Configuration has changed, set TX power accordingly. */ 5431 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5432 device_printf(sc->sc_dev, 5433 "%s: could not set TX power, error %d\n", __func__, error); 5434 return error; 5435 } 5436 5437 /* Fake a join to initialize the TX rate. */ 5438 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 5439 iwn_newassoc(ni, 1); 5440 5441 /* Add BSS node. */ 5442 memset(&node, 0, sizeof node); 5443 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 5444 node.id = IWN_ID_BSS; 5445 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5446 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 5447 case IEEE80211_HTCAP_SMPS_ENA: 5448 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 5449 break; 5450 case IEEE80211_HTCAP_SMPS_DYNAMIC: 5451 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 5452 break; 5453 } 5454 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 5455 IWN_AMDPU_DENSITY(5)); /* 4us */ 5456 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 5457 node.htflags |= htole32(IWN_NODE_HT40); 5458 } 5459 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 5460 error = ops->add_node(sc, &node, 1); 5461 if (error != 0) { 5462 device_printf(sc->sc_dev, 5463 "%s: could not add BSS node, error %d\n", __func__, error); 5464 return error; 5465 } 5466 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 5467 __func__, node.id); 5468 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 5469 device_printf(sc->sc_dev, 5470 "%s: could not setup link quality for node %d, error %d\n", 5471 __func__, node.id, error); 5472 return error; 5473 } 5474 5475 if ((error = iwn_init_sensitivity(sc)) != 0) { 5476 device_printf(sc->sc_dev, 5477 "%s: could not set sensitivity, error %d\n", __func__, 5478 error); 5479 return error; 5480 } 5481 /* Start periodic calibration timer. */ 5482 sc->calib.state = IWN_CALIB_STATE_ASSOC; 5483 sc->calib_cnt = 0; 5484 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 5485 sc); 5486 5487 /* Link LED always on while associated. */ 5488 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 5489 return 0; 5490 } 5491 5492 /* 5493 * This function is called by upper layer when an ADDBA request is received 5494 * from another STA and before the ADDBA response is sent. 5495 */ 5496 static int 5497 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 5498 int baparamset, int batimeout, int baseqctl) 5499 { 5500 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 5501 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5502 struct iwn_ops *ops = &sc->ops; 5503 struct iwn_node *wn = (void *)ni; 5504 struct iwn_node_info node; 5505 uint16_t ssn; 5506 uint8_t tid; 5507 int error; 5508 5509 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 5510 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 5511 5512 memset(&node, 0, sizeof node); 5513 node.id = wn->id; 5514 node.control = IWN_NODE_UPDATE; 5515 node.flags = IWN_FLAG_SET_ADDBA; 5516 node.addba_tid = tid; 5517 node.addba_ssn = htole16(ssn); 5518 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 5519 wn->id, tid, ssn); 5520 error = ops->add_node(sc, &node, 1); 5521 if (error != 0) 5522 return error; 5523 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 5524 #undef MS 5525 } 5526 5527 /* 5528 * This function is called by upper layer on teardown of an HT-immediate 5529 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5530 */ 5531 static void 5532 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 5533 { 5534 struct ieee80211com *ic = ni->ni_ic; 5535 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5536 struct iwn_ops *ops = &sc->ops; 5537 struct iwn_node *wn = (void *)ni; 5538 struct iwn_node_info node; 5539 uint8_t tid; 5540 5541 /* XXX: tid as an argument */ 5542 for (tid = 0; tid < WME_NUM_TID; tid++) { 5543 if (&ni->ni_rx_ampdu[tid] == rap) 5544 break; 5545 } 5546 5547 memset(&node, 0, sizeof node); 5548 node.id = wn->id; 5549 node.control = IWN_NODE_UPDATE; 5550 node.flags = IWN_FLAG_SET_DELBA; 5551 node.delba_tid = tid; 5552 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5553 (void)ops->add_node(sc, &node, 1); 5554 sc->sc_ampdu_rx_stop(ni, rap); 5555 } 5556 5557 static int 5558 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5559 int dialogtoken, int baparamset, int batimeout) 5560 { 5561 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5562 int qid; 5563 5564 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 5565 if (sc->qid2tap[qid] == NULL) 5566 break; 5567 } 5568 if (qid == sc->ntxqs) { 5569 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 5570 __func__); 5571 return 0; 5572 } 5573 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 5574 if (tap->txa_private == NULL) { 5575 device_printf(sc->sc_dev, 5576 "%s: failed to alloc TX aggregation structure\n", __func__); 5577 return 0; 5578 } 5579 sc->qid2tap[qid] = tap; 5580 *(int *)tap->txa_private = qid; 5581 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5582 batimeout); 5583 } 5584 5585 static int 5586 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5587 int code, int baparamset, int batimeout) 5588 { 5589 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5590 int qid = *(int *)tap->txa_private; 5591 uint8_t tid = WME_AC_TO_TID(tap->txa_ac); 5592 int ret; 5593 5594 if (code == IEEE80211_STATUS_SUCCESS) { 5595 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 5596 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 5597 if (ret != 1) 5598 return ret; 5599 } else { 5600 sc->qid2tap[qid] = NULL; 5601 free(tap->txa_private, M_DEVBUF); 5602 tap->txa_private = NULL; 5603 } 5604 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 5605 } 5606 5607 /* 5608 * This function is called by upper layer when an ADDBA response is received 5609 * from another STA. 5610 */ 5611 static int 5612 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5613 uint8_t tid) 5614 { 5615 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[TID_TO_WME_AC(tid)]; 5616 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5617 struct iwn_ops *ops = &sc->ops; 5618 struct iwn_node *wn = (void *)ni; 5619 struct iwn_node_info node; 5620 int error, qid; 5621 5622 /* Enable TX for the specified RA/TID. */ 5623 wn->disable_tid &= ~(1 << tid); 5624 memset(&node, 0, sizeof node); 5625 node.id = wn->id; 5626 node.control = IWN_NODE_UPDATE; 5627 node.flags = IWN_FLAG_SET_DISABLE_TID; 5628 node.disable_tid = htole16(wn->disable_tid); 5629 error = ops->add_node(sc, &node, 1); 5630 if (error != 0) 5631 return 0; 5632 5633 if ((error = iwn_nic_lock(sc)) != 0) 5634 return 0; 5635 qid = *(int *)tap->txa_private; 5636 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 5637 iwn_nic_unlock(sc); 5638 5639 iwn_set_link_quality(sc, ni); 5640 return 1; 5641 } 5642 5643 static void 5644 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5645 { 5646 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5647 struct iwn_ops *ops = &sc->ops; 5648 uint8_t tid = WME_AC_TO_TID(tap->txa_ac); 5649 int qid; 5650 5651 if (tap->txa_private == NULL) 5652 return; 5653 5654 qid = *(int *)tap->txa_private; 5655 if (iwn_nic_lock(sc) != 0) 5656 return; 5657 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 5658 iwn_nic_unlock(sc); 5659 sc->qid2tap[qid] = NULL; 5660 free(tap->txa_private, M_DEVBUF); 5661 tap->txa_private = NULL; 5662 sc->sc_addba_stop(ni, tap); 5663 } 5664 5665 static void 5666 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5667 int qid, uint8_t tid, uint16_t ssn) 5668 { 5669 struct iwn_node *wn = (void *)ni; 5670 5671 /* Stop TX scheduler while we're changing its configuration. */ 5672 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5673 IWN4965_TXQ_STATUS_CHGACT); 5674 5675 /* Assign RA/TID translation to the queue. */ 5676 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5677 wn->id << 4 | tid); 5678 5679 /* Enable chain-building mode for the queue. */ 5680 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5681 5682 /* Set starting sequence number from the ADDBA request. */ 5683 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 5684 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5685 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5686 5687 /* Set scheduler window size. */ 5688 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 5689 IWN_SCHED_WINSZ); 5690 /* Set scheduler frame limit. */ 5691 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5692 IWN_SCHED_LIMIT << 16); 5693 5694 /* Enable interrupts for the queue. */ 5695 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5696 5697 /* Mark the queue as active. */ 5698 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5699 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 5700 iwn_tid2fifo[tid] << 1); 5701 } 5702 5703 static void 5704 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 5705 { 5706 /* Stop TX scheduler while we're changing its configuration. */ 5707 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5708 IWN4965_TXQ_STATUS_CHGACT); 5709 5710 /* Set starting sequence number from the ADDBA request. */ 5711 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5712 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 5713 5714 /* Disable interrupts for the queue. */ 5715 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 5716 5717 /* Mark the queue as inactive. */ 5718 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5719 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 5720 } 5721 5722 static void 5723 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5724 int qid, uint8_t tid, uint16_t ssn) 5725 { 5726 struct iwn_node *wn = (void *)ni; 5727 5728 /* Stop TX scheduler while we're changing its configuration. */ 5729 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5730 IWN5000_TXQ_STATUS_CHGACT); 5731 5732 /* Assign RA/TID translation to the queue. */ 5733 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 5734 wn->id << 4 | tid); 5735 5736 /* Enable chain-building mode for the queue. */ 5737 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 5738 5739 /* Enable aggregation for the queue. */ 5740 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5741 5742 /* Set starting sequence number from the ADDBA request. */ 5743 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 5744 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5745 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5746 5747 /* Set scheduler window size and frame limit. */ 5748 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5749 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5750 5751 /* Enable interrupts for the queue. */ 5752 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5753 5754 /* Mark the queue as active. */ 5755 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5756 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 5757 } 5758 5759 static void 5760 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 5761 { 5762 /* Stop TX scheduler while we're changing its configuration. */ 5763 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5764 IWN5000_TXQ_STATUS_CHGACT); 5765 5766 /* Disable aggregation for the queue. */ 5767 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 5768 5769 /* Set starting sequence number from the ADDBA request. */ 5770 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 5771 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 5772 5773 /* Disable interrupts for the queue. */ 5774 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 5775 5776 /* Mark the queue as inactive. */ 5777 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5778 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 5779 } 5780 5781 /* 5782 * Query calibration tables from the initialization firmware. We do this 5783 * only once at first boot. Called from a process context. 5784 */ 5785 static int 5786 iwn5000_query_calibration(struct iwn_softc *sc) 5787 { 5788 struct iwn5000_calib_config cmd; 5789 int error; 5790 5791 memset(&cmd, 0, sizeof cmd); 5792 cmd.ucode.once.enable = 0xffffffff; 5793 cmd.ucode.once.start = 0xffffffff; 5794 cmd.ucode.once.send = 0xffffffff; 5795 cmd.ucode.flags = 0xffffffff; 5796 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 5797 __func__); 5798 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 5799 if (error != 0) 5800 return error; 5801 5802 /* Wait at most two seconds for calibration to complete. */ 5803 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 5804 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 5805 return error; 5806 } 5807 5808 /* 5809 * Send calibration results to the runtime firmware. These results were 5810 * obtained on first boot from the initialization firmware. 5811 */ 5812 static int 5813 iwn5000_send_calibration(struct iwn_softc *sc) 5814 { 5815 int idx, error; 5816 5817 for (idx = 0; idx < 5; idx++) { 5818 if (sc->calibcmd[idx].buf == NULL) 5819 continue; /* No results available. */ 5820 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5821 "send calibration result idx=%d len=%d\n", idx, 5822 sc->calibcmd[idx].len); 5823 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 5824 sc->calibcmd[idx].len, 0); 5825 if (error != 0) { 5826 device_printf(sc->sc_dev, 5827 "%s: could not send calibration result, error %d\n", 5828 __func__, error); 5829 return error; 5830 } 5831 } 5832 return 0; 5833 } 5834 5835 static int 5836 iwn5000_send_wimax_coex(struct iwn_softc *sc) 5837 { 5838 struct iwn5000_wimax_coex wimax; 5839 5840 #ifdef notyet 5841 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5842 /* Enable WiMAX coexistence for combo adapters. */ 5843 wimax.flags = 5844 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 5845 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 5846 IWN_WIMAX_COEX_STA_TABLE_VALID | 5847 IWN_WIMAX_COEX_ENABLE; 5848 memcpy(wimax.events, iwn6050_wimax_events, 5849 sizeof iwn6050_wimax_events); 5850 } else 5851 #endif 5852 { 5853 /* Disable WiMAX coexistence. */ 5854 wimax.flags = 0; 5855 memset(wimax.events, 0, sizeof wimax.events); 5856 } 5857 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 5858 __func__); 5859 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 5860 } 5861 5862 static int 5863 iwn5000_crystal_calib(struct iwn_softc *sc) 5864 { 5865 struct iwn5000_phy_calib_crystal cmd; 5866 5867 memset(&cmd, 0, sizeof cmd); 5868 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 5869 cmd.ngroups = 1; 5870 cmd.isvalid = 1; 5871 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 5872 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 5873 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 5874 cmd.cap_pin[0], cmd.cap_pin[1]); 5875 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5876 } 5877 5878 static int 5879 iwn5000_temp_offset_calib(struct iwn_softc *sc) 5880 { 5881 struct iwn5000_phy_calib_temp_offset cmd; 5882 5883 memset(&cmd, 0, sizeof cmd); 5884 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 5885 cmd.ngroups = 1; 5886 cmd.isvalid = 1; 5887 if (sc->eeprom_temp != 0) 5888 cmd.offset = htole16(sc->eeprom_temp); 5889 else 5890 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 5891 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 5892 le16toh(cmd.offset)); 5893 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 5894 } 5895 5896 /* 5897 * This function is called after the runtime firmware notifies us of its 5898 * readiness (called in a process context). 5899 */ 5900 static int 5901 iwn4965_post_alive(struct iwn_softc *sc) 5902 { 5903 int error, qid; 5904 5905 if ((error = iwn_nic_lock(sc)) != 0) 5906 return error; 5907 5908 /* Clear TX scheduler state in SRAM. */ 5909 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5910 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 5911 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 5912 5913 /* Set physical address of TX scheduler rings (1KB aligned). */ 5914 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5915 5916 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5917 5918 /* Disable chain mode for all our 16 queues. */ 5919 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 5920 5921 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 5922 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 5923 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5924 5925 /* Set scheduler window size. */ 5926 iwn_mem_write(sc, sc->sched_base + 5927 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 5928 /* Set scheduler frame limit. */ 5929 iwn_mem_write(sc, sc->sched_base + 5930 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 5931 IWN_SCHED_LIMIT << 16); 5932 } 5933 5934 /* Enable interrupts for all our 16 queues. */ 5935 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 5936 /* Identify TX FIFO rings (0-7). */ 5937 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 5938 5939 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5940 for (qid = 0; qid < 7; qid++) { 5941 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 5942 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5943 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 5944 } 5945 iwn_nic_unlock(sc); 5946 return 0; 5947 } 5948 5949 /* 5950 * This function is called after the initialization or runtime firmware 5951 * notifies us of its readiness (called in a process context). 5952 */ 5953 static int 5954 iwn5000_post_alive(struct iwn_softc *sc) 5955 { 5956 int error, qid; 5957 5958 /* Switch to using ICT interrupt mode. */ 5959 iwn5000_ict_reset(sc); 5960 5961 if ((error = iwn_nic_lock(sc)) != 0) 5962 return error; 5963 5964 /* Clear TX scheduler state in SRAM. */ 5965 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 5966 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 5967 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 5968 5969 /* Set physical address of TX scheduler rings (1KB aligned). */ 5970 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 5971 5972 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 5973 5974 /* Enable chain mode for all queues, except command queue. */ 5975 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 5976 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 5977 5978 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 5979 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 5980 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 5981 5982 iwn_mem_write(sc, sc->sched_base + 5983 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 5984 /* Set scheduler window size and frame limit. */ 5985 iwn_mem_write(sc, sc->sched_base + 5986 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 5987 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 5988 } 5989 5990 /* Enable interrupts for all our 20 queues. */ 5991 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 5992 /* Identify TX FIFO rings (0-7). */ 5993 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 5994 5995 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 5996 for (qid = 0; qid < 7; qid++) { 5997 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 5998 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 5999 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 6000 } 6001 iwn_nic_unlock(sc); 6002 6003 /* Configure WiMAX coexistence for combo adapters. */ 6004 error = iwn5000_send_wimax_coex(sc); 6005 if (error != 0) { 6006 device_printf(sc->sc_dev, 6007 "%s: could not configure WiMAX coexistence, error %d\n", 6008 __func__, error); 6009 return error; 6010 } 6011 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 6012 /* Perform crystal calibration. */ 6013 error = iwn5000_crystal_calib(sc); 6014 if (error != 0) { 6015 device_printf(sc->sc_dev, 6016 "%s: crystal calibration failed, error %d\n", 6017 __func__, error); 6018 return error; 6019 } 6020 } 6021 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 6022 /* Query calibration from the initialization firmware. */ 6023 if ((error = iwn5000_query_calibration(sc)) != 0) { 6024 device_printf(sc->sc_dev, 6025 "%s: could not query calibration, error %d\n", 6026 __func__, error); 6027 return error; 6028 } 6029 /* 6030 * We have the calibration results now, reboot with the 6031 * runtime firmware (call ourselves recursively!) 6032 */ 6033 iwn_hw_stop(sc); 6034 error = iwn_hw_init(sc); 6035 } else { 6036 /* Send calibration results to runtime firmware. */ 6037 error = iwn5000_send_calibration(sc); 6038 } 6039 return error; 6040 } 6041 6042 /* 6043 * The firmware boot code is small and is intended to be copied directly into 6044 * the NIC internal memory (no DMA transfer). 6045 */ 6046 static int 6047 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 6048 { 6049 int error, ntries; 6050 6051 size /= sizeof (uint32_t); 6052 6053 if ((error = iwn_nic_lock(sc)) != 0) 6054 return error; 6055 6056 /* Copy microcode image into NIC memory. */ 6057 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 6058 (const uint32_t *)ucode, size); 6059 6060 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 6061 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 6062 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 6063 6064 /* Start boot load now. */ 6065 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 6066 6067 /* Wait for transfer to complete. */ 6068 for (ntries = 0; ntries < 1000; ntries++) { 6069 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 6070 IWN_BSM_WR_CTRL_START)) 6071 break; 6072 DELAY(10); 6073 } 6074 if (ntries == 1000) { 6075 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6076 __func__); 6077 iwn_nic_unlock(sc); 6078 return ETIMEDOUT; 6079 } 6080 6081 /* Enable boot after power up. */ 6082 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 6083 6084 iwn_nic_unlock(sc); 6085 return 0; 6086 } 6087 6088 static int 6089 iwn4965_load_firmware(struct iwn_softc *sc) 6090 { 6091 struct iwn_fw_info *fw = &sc->fw; 6092 struct iwn_dma_info *dma = &sc->fw_dma; 6093 int error; 6094 6095 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 6096 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 6097 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6098 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6099 fw->init.text, fw->init.textsz); 6100 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6101 6102 /* Tell adapter where to find initialization sections. */ 6103 if ((error = iwn_nic_lock(sc)) != 0) 6104 return error; 6105 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6106 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 6107 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6108 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6109 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 6110 iwn_nic_unlock(sc); 6111 6112 /* Load firmware boot code. */ 6113 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 6114 if (error != 0) { 6115 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6116 __func__); 6117 return error; 6118 } 6119 /* Now press "execute". */ 6120 IWN_WRITE(sc, IWN_RESET, 0); 6121 6122 /* Wait at most one second for first alive notification. */ 6123 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 6124 device_printf(sc->sc_dev, 6125 "%s: timeout waiting for adapter to initialize, error %d\n", 6126 __func__, error); 6127 return error; 6128 } 6129 6130 /* Retrieve current temperature for initial TX power calibration. */ 6131 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 6132 sc->temp = iwn4965_get_temperature(sc); 6133 6134 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 6135 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 6136 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6137 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6138 fw->main.text, fw->main.textsz); 6139 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6140 6141 /* Tell adapter where to find runtime sections. */ 6142 if ((error = iwn_nic_lock(sc)) != 0) 6143 return error; 6144 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6145 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 6146 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6147 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6148 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 6149 IWN_FW_UPDATED | fw->main.textsz); 6150 iwn_nic_unlock(sc); 6151 6152 return 0; 6153 } 6154 6155 static int 6156 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 6157 const uint8_t *section, int size) 6158 { 6159 struct iwn_dma_info *dma = &sc->fw_dma; 6160 int error; 6161 6162 /* Copy firmware section into pre-allocated DMA-safe memory. */ 6163 memcpy(dma->vaddr, section, size); 6164 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6165 6166 if ((error = iwn_nic_lock(sc)) != 0) 6167 return error; 6168 6169 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6170 IWN_FH_TX_CONFIG_DMA_PAUSE); 6171 6172 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 6173 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 6174 IWN_LOADDR(dma->paddr)); 6175 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 6176 IWN_HIADDR(dma->paddr) << 28 | size); 6177 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 6178 IWN_FH_TXBUF_STATUS_TBNUM(1) | 6179 IWN_FH_TXBUF_STATUS_TBIDX(1) | 6180 IWN_FH_TXBUF_STATUS_TFBD_VALID); 6181 6182 /* Kick Flow Handler to start DMA transfer. */ 6183 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6184 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 6185 6186 iwn_nic_unlock(sc); 6187 6188 /* Wait at most five seconds for FH DMA transfer to complete. */ 6189 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 6190 } 6191 6192 static int 6193 iwn5000_load_firmware(struct iwn_softc *sc) 6194 { 6195 struct iwn_fw_part *fw; 6196 int error; 6197 6198 /* Load the initialization firmware on first boot only. */ 6199 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 6200 &sc->fw.main : &sc->fw.init; 6201 6202 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 6203 fw->text, fw->textsz); 6204 if (error != 0) { 6205 device_printf(sc->sc_dev, 6206 "%s: could not load firmware %s section, error %d\n", 6207 __func__, ".text", error); 6208 return error; 6209 } 6210 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 6211 fw->data, fw->datasz); 6212 if (error != 0) { 6213 device_printf(sc->sc_dev, 6214 "%s: could not load firmware %s section, error %d\n", 6215 __func__, ".data", error); 6216 return error; 6217 } 6218 6219 /* Now press "execute". */ 6220 IWN_WRITE(sc, IWN_RESET, 0); 6221 return 0; 6222 } 6223 6224 /* 6225 * Extract text and data sections from a legacy firmware image. 6226 */ 6227 static int 6228 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 6229 { 6230 const uint32_t *ptr; 6231 size_t hdrlen = 24; 6232 uint32_t rev; 6233 6234 ptr = (const uint32_t *)fw->data; 6235 rev = le32toh(*ptr++); 6236 6237 /* Check firmware API version. */ 6238 if (IWN_FW_API(rev) <= 1) { 6239 device_printf(sc->sc_dev, 6240 "%s: bad firmware, need API version >=2\n", __func__); 6241 return EINVAL; 6242 } 6243 if (IWN_FW_API(rev) >= 3) { 6244 /* Skip build number (version 2 header). */ 6245 hdrlen += 4; 6246 ptr++; 6247 } 6248 if (fw->size < hdrlen) { 6249 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6250 __func__, fw->size); 6251 return EINVAL; 6252 } 6253 fw->main.textsz = le32toh(*ptr++); 6254 fw->main.datasz = le32toh(*ptr++); 6255 fw->init.textsz = le32toh(*ptr++); 6256 fw->init.datasz = le32toh(*ptr++); 6257 fw->boot.textsz = le32toh(*ptr++); 6258 6259 /* Check that all firmware sections fit. */ 6260 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 6261 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 6262 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6263 __func__, fw->size); 6264 return EINVAL; 6265 } 6266 6267 /* Get pointers to firmware sections. */ 6268 fw->main.text = (const uint8_t *)ptr; 6269 fw->main.data = fw->main.text + fw->main.textsz; 6270 fw->init.text = fw->main.data + fw->main.datasz; 6271 fw->init.data = fw->init.text + fw->init.textsz; 6272 fw->boot.text = fw->init.data + fw->init.datasz; 6273 return 0; 6274 } 6275 6276 /* 6277 * Extract text and data sections from a TLV firmware image. 6278 */ 6279 static int 6280 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 6281 uint16_t alt) 6282 { 6283 const struct iwn_fw_tlv_hdr *hdr; 6284 const struct iwn_fw_tlv *tlv; 6285 const uint8_t *ptr, *end; 6286 uint64_t altmask; 6287 uint32_t len, tmp; 6288 6289 if (fw->size < sizeof (*hdr)) { 6290 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6291 __func__, fw->size); 6292 return EINVAL; 6293 } 6294 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 6295 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 6296 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 6297 __func__, le32toh(hdr->signature)); 6298 return EINVAL; 6299 } 6300 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 6301 le32toh(hdr->build)); 6302 6303 /* 6304 * Select the closest supported alternative that is less than 6305 * or equal to the specified one. 6306 */ 6307 altmask = le64toh(hdr->altmask); 6308 while (alt > 0 && !(altmask & (1ULL << alt))) 6309 alt--; /* Downgrade. */ 6310 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 6311 6312 ptr = (const uint8_t *)(hdr + 1); 6313 end = (const uint8_t *)(fw->data + fw->size); 6314 6315 /* Parse type-length-value fields. */ 6316 while (ptr + sizeof (*tlv) <= end) { 6317 tlv = (const struct iwn_fw_tlv *)ptr; 6318 len = le32toh(tlv->len); 6319 6320 ptr += sizeof (*tlv); 6321 if (ptr + len > end) { 6322 device_printf(sc->sc_dev, 6323 "%s: firmware too short: %zu bytes\n", __func__, 6324 fw->size); 6325 return EINVAL; 6326 } 6327 /* Skip other alternatives. */ 6328 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 6329 goto next; 6330 6331 switch (le16toh(tlv->type)) { 6332 case IWN_FW_TLV_MAIN_TEXT: 6333 fw->main.text = ptr; 6334 fw->main.textsz = len; 6335 break; 6336 case IWN_FW_TLV_MAIN_DATA: 6337 fw->main.data = ptr; 6338 fw->main.datasz = len; 6339 break; 6340 case IWN_FW_TLV_INIT_TEXT: 6341 fw->init.text = ptr; 6342 fw->init.textsz = len; 6343 break; 6344 case IWN_FW_TLV_INIT_DATA: 6345 fw->init.data = ptr; 6346 fw->init.datasz = len; 6347 break; 6348 case IWN_FW_TLV_BOOT_TEXT: 6349 fw->boot.text = ptr; 6350 fw->boot.textsz = len; 6351 break; 6352 case IWN_FW_TLV_ENH_SENS: 6353 if (!len) 6354 sc->sc_flags |= IWN_FLAG_ENH_SENS; 6355 break; 6356 case IWN_FW_TLV_PHY_CALIB: 6357 tmp = htole32(*ptr); 6358 if (tmp < 253) { 6359 sc->reset_noise_gain = tmp; 6360 sc->noise_gain = tmp + 1; 6361 } 6362 break; 6363 default: 6364 DPRINTF(sc, IWN_DEBUG_RESET, 6365 "TLV type %d not handled\n", le16toh(tlv->type)); 6366 break; 6367 } 6368 next: /* TLV fields are 32-bit aligned. */ 6369 ptr += (len + 3) & ~3; 6370 } 6371 return 0; 6372 } 6373 6374 static int 6375 iwn_read_firmware(struct iwn_softc *sc) 6376 { 6377 struct iwn_fw_info *fw = &sc->fw; 6378 int error; 6379 6380 IWN_UNLOCK(sc); 6381 6382 memset(fw, 0, sizeof (*fw)); 6383 6384 /* Read firmware image from filesystem. */ 6385 sc->fw_fp = firmware_get(sc->fwname); 6386 if (sc->fw_fp == NULL) { 6387 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 6388 __func__, sc->fwname); 6389 IWN_LOCK(sc); 6390 return EINVAL; 6391 } 6392 IWN_LOCK(sc); 6393 6394 fw->size = sc->fw_fp->datasize; 6395 fw->data = (const uint8_t *)sc->fw_fp->data; 6396 if (fw->size < sizeof (uint32_t)) { 6397 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6398 __func__, fw->size); 6399 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6400 sc->fw_fp = NULL; 6401 return EINVAL; 6402 } 6403 6404 /* Retrieve text and data sections. */ 6405 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 6406 error = iwn_read_firmware_leg(sc, fw); 6407 else 6408 error = iwn_read_firmware_tlv(sc, fw, 1); 6409 if (error != 0) { 6410 device_printf(sc->sc_dev, 6411 "%s: could not read firmware sections, error %d\n", 6412 __func__, error); 6413 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6414 sc->fw_fp = NULL; 6415 return error; 6416 } 6417 6418 /* Make sure text and data sections fit in hardware memory. */ 6419 if (fw->main.textsz > sc->fw_text_maxsz || 6420 fw->main.datasz > sc->fw_data_maxsz || 6421 fw->init.textsz > sc->fw_text_maxsz || 6422 fw->init.datasz > sc->fw_data_maxsz || 6423 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 6424 (fw->boot.textsz & 3) != 0) { 6425 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 6426 __func__); 6427 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6428 sc->fw_fp = NULL; 6429 return EINVAL; 6430 } 6431 6432 /* We can proceed with loading the firmware. */ 6433 return 0; 6434 } 6435 6436 static int 6437 iwn_clock_wait(struct iwn_softc *sc) 6438 { 6439 int ntries; 6440 6441 /* Set "initialization complete" bit. */ 6442 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6443 6444 /* Wait for clock stabilization. */ 6445 for (ntries = 0; ntries < 2500; ntries++) { 6446 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 6447 return 0; 6448 DELAY(10); 6449 } 6450 device_printf(sc->sc_dev, 6451 "%s: timeout waiting for clock stabilization\n", __func__); 6452 return ETIMEDOUT; 6453 } 6454 6455 static int 6456 iwn_apm_init(struct iwn_softc *sc) 6457 { 6458 uint32_t reg; 6459 int error; 6460 6461 /* Disable L0s exit timer (NMI bug workaround). */ 6462 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6463 /* Don't wait for ICH L0s (ICH bug workaround). */ 6464 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6465 6466 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6467 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6468 6469 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6470 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6471 6472 /* Retrieve PCIe Active State Power Management (ASPM). */ 6473 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6474 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6475 if (reg & 0x02) /* L1 Entry enabled. */ 6476 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6477 else 6478 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6479 6480 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6481 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6482 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6483 6484 /* Wait for clock stabilization before accessing prph. */ 6485 if ((error = iwn_clock_wait(sc)) != 0) 6486 return error; 6487 6488 if ((error = iwn_nic_lock(sc)) != 0) 6489 return error; 6490 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6491 /* Enable DMA and BSM (Bootstrap State Machine). */ 6492 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6493 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6494 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6495 } else { 6496 /* Enable DMA. */ 6497 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6498 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6499 } 6500 DELAY(20); 6501 /* Disable L1-Active. */ 6502 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6503 iwn_nic_unlock(sc); 6504 6505 return 0; 6506 } 6507 6508 static void 6509 iwn_apm_stop_master(struct iwn_softc *sc) 6510 { 6511 int ntries; 6512 6513 /* Stop busmaster DMA activity. */ 6514 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6515 for (ntries = 0; ntries < 100; ntries++) { 6516 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6517 return; 6518 DELAY(10); 6519 } 6520 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 6521 } 6522 6523 static void 6524 iwn_apm_stop(struct iwn_softc *sc) 6525 { 6526 iwn_apm_stop_master(sc); 6527 6528 /* Reset the entire device. */ 6529 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6530 DELAY(10); 6531 /* Clear "initialization complete" bit. */ 6532 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6533 } 6534 6535 static int 6536 iwn4965_nic_config(struct iwn_softc *sc) 6537 { 6538 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6539 /* 6540 * I don't believe this to be correct but this is what the 6541 * vendor driver is doing. Probably the bits should not be 6542 * shifted in IWN_RFCFG_*. 6543 */ 6544 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6545 IWN_RFCFG_TYPE(sc->rfcfg) | 6546 IWN_RFCFG_STEP(sc->rfcfg) | 6547 IWN_RFCFG_DASH(sc->rfcfg)); 6548 } 6549 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6550 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6551 return 0; 6552 } 6553 6554 static int 6555 iwn5000_nic_config(struct iwn_softc *sc) 6556 { 6557 uint32_t tmp; 6558 int error; 6559 6560 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6561 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6562 IWN_RFCFG_TYPE(sc->rfcfg) | 6563 IWN_RFCFG_STEP(sc->rfcfg) | 6564 IWN_RFCFG_DASH(sc->rfcfg)); 6565 } 6566 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6567 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6568 6569 if ((error = iwn_nic_lock(sc)) != 0) 6570 return error; 6571 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6572 6573 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6574 /* 6575 * Select first Switching Voltage Regulator (1.32V) to 6576 * solve a stability issue related to noisy DC2DC line 6577 * in the silicon of 1000 Series. 6578 */ 6579 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6580 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6581 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6582 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6583 } 6584 iwn_nic_unlock(sc); 6585 6586 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6587 /* Use internal power amplifier only. */ 6588 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6589 } 6590 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6591 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6592 /* Indicate that ROM calibration version is >=6. */ 6593 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6594 } 6595 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6596 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6597 return 0; 6598 } 6599 6600 /* 6601 * Take NIC ownership over Intel Active Management Technology (AMT). 6602 */ 6603 static int 6604 iwn_hw_prepare(struct iwn_softc *sc) 6605 { 6606 int ntries; 6607 6608 /* Check if hardware is ready. */ 6609 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6610 for (ntries = 0; ntries < 5; ntries++) { 6611 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6612 IWN_HW_IF_CONFIG_NIC_READY) 6613 return 0; 6614 DELAY(10); 6615 } 6616 6617 /* Hardware not ready, force into ready state. */ 6618 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6619 for (ntries = 0; ntries < 15000; ntries++) { 6620 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6621 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6622 break; 6623 DELAY(10); 6624 } 6625 if (ntries == 15000) 6626 return ETIMEDOUT; 6627 6628 /* Hardware should be ready now. */ 6629 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6630 for (ntries = 0; ntries < 5; ntries++) { 6631 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6632 IWN_HW_IF_CONFIG_NIC_READY) 6633 return 0; 6634 DELAY(10); 6635 } 6636 return ETIMEDOUT; 6637 } 6638 6639 static int 6640 iwn_hw_init(struct iwn_softc *sc) 6641 { 6642 struct iwn_ops *ops = &sc->ops; 6643 int error, chnl, qid; 6644 6645 /* Clear pending interrupts. */ 6646 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6647 6648 if ((error = iwn_apm_init(sc)) != 0) { 6649 device_printf(sc->sc_dev, 6650 "%s: could not power ON adapter, error %d\n", __func__, 6651 error); 6652 return error; 6653 } 6654 6655 /* Select VMAIN power source. */ 6656 if ((error = iwn_nic_lock(sc)) != 0) 6657 return error; 6658 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 6659 iwn_nic_unlock(sc); 6660 6661 /* Perform adapter-specific initialization. */ 6662 if ((error = ops->nic_config(sc)) != 0) 6663 return error; 6664 6665 /* Initialize RX ring. */ 6666 if ((error = iwn_nic_lock(sc)) != 0) 6667 return error; 6668 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 6669 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 6670 /* Set physical address of RX ring (256-byte aligned). */ 6671 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 6672 /* Set physical address of RX status (16-byte aligned). */ 6673 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 6674 /* Enable RX. */ 6675 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 6676 IWN_FH_RX_CONFIG_ENA | 6677 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 6678 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 6679 IWN_FH_RX_CONFIG_SINGLE_FRAME | 6680 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 6681 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 6682 iwn_nic_unlock(sc); 6683 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 6684 6685 if ((error = iwn_nic_lock(sc)) != 0) 6686 return error; 6687 6688 /* Initialize TX scheduler. */ 6689 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6690 6691 /* Set physical address of "keep warm" page (16-byte aligned). */ 6692 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 6693 6694 /* Initialize TX rings. */ 6695 for (qid = 0; qid < sc->ntxqs; qid++) { 6696 struct iwn_tx_ring *txq = &sc->txq[qid]; 6697 6698 /* Set physical address of TX ring (256-byte aligned). */ 6699 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 6700 txq->desc_dma.paddr >> 8); 6701 } 6702 iwn_nic_unlock(sc); 6703 6704 /* Enable DMA channels. */ 6705 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6706 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 6707 IWN_FH_TX_CONFIG_DMA_ENA | 6708 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 6709 } 6710 6711 /* Clear "radio off" and "commands blocked" bits. */ 6712 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6713 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 6714 6715 /* Clear pending interrupts. */ 6716 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6717 /* Enable interrupt coalescing. */ 6718 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 6719 /* Enable interrupts. */ 6720 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6721 6722 /* _Really_ make sure "radio off" bit is cleared! */ 6723 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6724 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 6725 6726 /* Enable shadow registers. */ 6727 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 6728 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 6729 6730 if ((error = ops->load_firmware(sc)) != 0) { 6731 device_printf(sc->sc_dev, 6732 "%s: could not load firmware, error %d\n", __func__, 6733 error); 6734 return error; 6735 } 6736 /* Wait at most one second for firmware alive notification. */ 6737 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 6738 device_printf(sc->sc_dev, 6739 "%s: timeout waiting for adapter to initialize, error %d\n", 6740 __func__, error); 6741 return error; 6742 } 6743 /* Do post-firmware initialization. */ 6744 return ops->post_alive(sc); 6745 } 6746 6747 static void 6748 iwn_hw_stop(struct iwn_softc *sc) 6749 { 6750 int chnl, qid, ntries; 6751 6752 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 6753 6754 /* Disable interrupts. */ 6755 IWN_WRITE(sc, IWN_INT_MASK, 0); 6756 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6757 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 6758 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6759 6760 /* Make sure we no longer hold the NIC lock. */ 6761 iwn_nic_unlock(sc); 6762 6763 /* Stop TX scheduler. */ 6764 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 6765 6766 /* Stop all DMA channels. */ 6767 if (iwn_nic_lock(sc) == 0) { 6768 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 6769 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 6770 for (ntries = 0; ntries < 200; ntries++) { 6771 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 6772 IWN_FH_TX_STATUS_IDLE(chnl)) 6773 break; 6774 DELAY(10); 6775 } 6776 } 6777 iwn_nic_unlock(sc); 6778 } 6779 6780 /* Stop RX ring. */ 6781 iwn_reset_rx_ring(sc, &sc->rxq); 6782 6783 /* Reset all TX rings. */ 6784 for (qid = 0; qid < sc->ntxqs; qid++) 6785 iwn_reset_tx_ring(sc, &sc->txq[qid]); 6786 6787 if (iwn_nic_lock(sc) == 0) { 6788 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 6789 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6790 iwn_nic_unlock(sc); 6791 } 6792 DELAY(5); 6793 /* Power OFF adapter. */ 6794 iwn_apm_stop(sc); 6795 } 6796 6797 static void 6798 iwn_radio_on(void *arg0, int pending) 6799 { 6800 struct iwn_softc *sc = arg0; 6801 struct ifnet *ifp = sc->sc_ifp; 6802 struct ieee80211com *ic = ifp->if_l2com; 6803 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6804 6805 if (vap != NULL) { 6806 iwn_init(sc); 6807 ieee80211_init(vap); 6808 } 6809 } 6810 6811 static void 6812 iwn_radio_off(void *arg0, int pending) 6813 { 6814 struct iwn_softc *sc = arg0; 6815 struct ifnet *ifp = sc->sc_ifp; 6816 struct ieee80211com *ic = ifp->if_l2com; 6817 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6818 6819 iwn_stop(sc); 6820 if (vap != NULL) 6821 ieee80211_stop(vap); 6822 6823 /* Enable interrupts to get RF toggle notification. */ 6824 IWN_LOCK(sc); 6825 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6826 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6827 IWN_UNLOCK(sc); 6828 } 6829 6830 static void 6831 iwn_init_locked(struct iwn_softc *sc) 6832 { 6833 struct ifnet *ifp = sc->sc_ifp; 6834 int error; 6835 6836 IWN_LOCK_ASSERT(sc); 6837 6838 if ((error = iwn_hw_prepare(sc)) != 0) { 6839 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 6840 __func__, error); 6841 goto fail; 6842 } 6843 6844 /* Initialize interrupt mask to default value. */ 6845 sc->int_mask = IWN_INT_MASK_DEF; 6846 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 6847 6848 /* Check that the radio is not disabled by hardware switch. */ 6849 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 6850 device_printf(sc->sc_dev, 6851 "radio is disabled by hardware switch\n"); 6852 /* Enable interrupts to get RF toggle notifications. */ 6853 IWN_WRITE(sc, IWN_INT, 0xffffffff); 6854 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 6855 return; 6856 } 6857 6858 /* Read firmware images from the filesystem. */ 6859 if ((error = iwn_read_firmware(sc)) != 0) { 6860 device_printf(sc->sc_dev, 6861 "%s: could not read firmware, error %d\n", __func__, 6862 error); 6863 goto fail; 6864 } 6865 6866 /* Initialize hardware and upload firmware. */ 6867 error = iwn_hw_init(sc); 6868 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6869 sc->fw_fp = NULL; 6870 if (error != 0) { 6871 device_printf(sc->sc_dev, 6872 "%s: could not initialize hardware, error %d\n", __func__, 6873 error); 6874 goto fail; 6875 } 6876 6877 /* Configure adapter now that it is ready. */ 6878 if ((error = iwn_config(sc)) != 0) { 6879 device_printf(sc->sc_dev, 6880 "%s: could not configure device, error %d\n", __func__, 6881 error); 6882 goto fail; 6883 } 6884 6885 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 6886 ifp->if_drv_flags |= IFF_DRV_RUNNING; 6887 6888 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 6889 return; 6890 6891 fail: iwn_stop_locked(sc); 6892 } 6893 6894 static void 6895 iwn_init(void *arg) 6896 { 6897 struct iwn_softc *sc = arg; 6898 struct ifnet *ifp = sc->sc_ifp; 6899 struct ieee80211com *ic = ifp->if_l2com; 6900 6901 IWN_LOCK(sc); 6902 iwn_init_locked(sc); 6903 IWN_UNLOCK(sc); 6904 6905 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 6906 ieee80211_start_all(ic); 6907 } 6908 6909 static void 6910 iwn_stop_locked(struct iwn_softc *sc) 6911 { 6912 struct ifnet *ifp = sc->sc_ifp; 6913 6914 IWN_LOCK_ASSERT(sc); 6915 6916 sc->sc_tx_timer = 0; 6917 callout_stop(&sc->watchdog_to); 6918 callout_stop(&sc->calib_to); 6919 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 6920 6921 /* Power OFF hardware. */ 6922 iwn_hw_stop(sc); 6923 } 6924 6925 static void 6926 iwn_stop(struct iwn_softc *sc) 6927 { 6928 IWN_LOCK(sc); 6929 iwn_stop_locked(sc); 6930 IWN_UNLOCK(sc); 6931 } 6932 6933 /* 6934 * Callback from net80211 to start a scan. 6935 */ 6936 static void 6937 iwn_scan_start(struct ieee80211com *ic) 6938 { 6939 struct ifnet *ifp = ic->ic_ifp; 6940 struct iwn_softc *sc = ifp->if_softc; 6941 6942 IWN_LOCK(sc); 6943 /* make the link LED blink while we're scanning */ 6944 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 6945 IWN_UNLOCK(sc); 6946 } 6947 6948 /* 6949 * Callback from net80211 to terminate a scan. 6950 */ 6951 static void 6952 iwn_scan_end(struct ieee80211com *ic) 6953 { 6954 struct ifnet *ifp = ic->ic_ifp; 6955 struct iwn_softc *sc = ifp->if_softc; 6956 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6957 6958 IWN_LOCK(sc); 6959 if (vap->iv_state == IEEE80211_S_RUN) { 6960 /* Set link LED to ON status if we are associated */ 6961 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6962 } 6963 IWN_UNLOCK(sc); 6964 } 6965 6966 /* 6967 * Callback from net80211 to force a channel change. 6968 */ 6969 static void 6970 iwn_set_channel(struct ieee80211com *ic) 6971 { 6972 const struct ieee80211_channel *c = ic->ic_curchan; 6973 struct ifnet *ifp = ic->ic_ifp; 6974 struct iwn_softc *sc = ifp->if_softc; 6975 int error; 6976 6977 IWN_LOCK(sc); 6978 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 6979 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 6980 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 6981 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 6982 6983 /* 6984 * Only need to set the channel in Monitor mode. AP scanning and auth 6985 * are already taken care of by their respective firmware commands. 6986 */ 6987 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6988 error = iwn_config(sc); 6989 if (error != 0) 6990 device_printf(sc->sc_dev, 6991 "%s: error %d settting channel\n", __func__, error); 6992 } 6993 IWN_UNLOCK(sc); 6994 } 6995 6996 /* 6997 * Callback from net80211 to start scanning of the current channel. 6998 */ 6999 static void 7000 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 7001 { 7002 struct ieee80211vap *vap = ss->ss_vap; 7003 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 7004 int error; 7005 7006 IWN_LOCK(sc); 7007 error = iwn_scan(sc); 7008 IWN_UNLOCK(sc); 7009 if (error != 0) 7010 ieee80211_cancel_scan(vap); 7011 } 7012 7013 /* 7014 * Callback from net80211 to handle the minimum dwell time being met. 7015 * The intent is to terminate the scan but we just let the firmware 7016 * notify us when it's finished as we have no safe way to abort it. 7017 */ 7018 static void 7019 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 7020 { 7021 /* NB: don't try to abort scan; wait for firmware to finish */ 7022 } 7023 7024 static void 7025 iwn_hw_reset(void *arg0, int pending) 7026 { 7027 struct iwn_softc *sc = arg0; 7028 struct ifnet *ifp = sc->sc_ifp; 7029 struct ieee80211com *ic = ifp->if_l2com; 7030 7031 iwn_stop(sc); 7032 iwn_init(sc); 7033 ieee80211_notify_radio(ic, 1); 7034 } 7035