1 /*- 2 * Copyright (c) 2007-2009 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Copyright (c) 2008 5 * Benjamin Close <benjsc@FreeBSD.org> 6 * Copyright (c) 2008 Sam Leffler, Errno Consulting 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 23 * adapters. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_wlan.h" 30 31 #include <sys/param.h> 32 #include <sys/sockio.h> 33 #include <sys/sysctl.h> 34 #include <sys/mbuf.h> 35 #include <sys/kernel.h> 36 #include <sys/socket.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/bus.h> 40 #include <sys/rman.h> 41 #include <sys/endian.h> 42 #include <sys/firmware.h> 43 #include <sys/limits.h> 44 #include <sys/module.h> 45 #include <sys/queue.h> 46 #include <sys/taskqueue.h> 47 48 #include <machine/bus.h> 49 #include <machine/resource.h> 50 #include <machine/clock.h> 51 52 #include <dev/pci/pcireg.h> 53 #include <dev/pci/pcivar.h> 54 55 #include <net/bpf.h> 56 #include <net/if.h> 57 #include <net/if_arp.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <netinet/if_ether.h> 67 #include <netinet/ip.h> 68 69 #include <net80211/ieee80211_var.h> 70 #include <net80211/ieee80211_radiotap.h> 71 #include <net80211/ieee80211_regdomain.h> 72 #include <net80211/ieee80211_ratectl.h> 73 74 #include <dev/iwn/if_iwnreg.h> 75 #include <dev/iwn/if_iwnvar.h> 76 77 struct iwn_ident { 78 uint16_t vendor; 79 uint16_t device; 80 const char *name; 81 }; 82 83 static const struct iwn_ident iwn_ident_table[] = { 84 { 0x8086, 0x0082, "Intel Centrino Advanced-N 6205" }, 85 { 0x8086, 0x0083, "Intel Centrino Wireless-N 1000" }, 86 { 0x8086, 0x0084, "Intel Centrino Wireless-N 1000" }, 87 { 0x8086, 0x0085, "Intel Centrino Advanced-N 6205" }, 88 { 0x8086, 0x0087, "Intel Centrino Advanced-N + WiMAX 6250" }, 89 { 0x8086, 0x0089, "Intel Centrino Advanced-N + WiMAX 6250" }, 90 { 0x8086, 0x008a, "Intel Centrino Wireless-N 1030" }, 91 { 0x8086, 0x008b, "Intel Centrino Wireless-N 1030" }, 92 { 0x8086, 0x0090, "Intel Centrino Advanced-N 6230" }, 93 { 0x8086, 0x0091, "Intel Centrino Advanced-N 6230" }, 94 { 0x8086, 0x0885, "Intel Centrino Wireless-N + WiMAX 6150" }, 95 { 0x8086, 0x0886, "Intel Centrino Wireless-N + WiMAX 6150" }, 96 { 0x8086, 0x0887, "Intel Centrino Wireless-N 2230" }, 97 { 0x8086, 0x0888, "Intel Centrino Wireless-N 2230" }, 98 { 0x8086, 0x0896, "Intel Centrino Wireless-N 130" }, 99 { 0x8086, 0x0897, "Intel Centrino Wireless-N 130" }, 100 { 0x8086, 0x08ae, "Intel Centrino Wireless-N 100" }, 101 { 0x8086, 0x08af, "Intel Centrino Wireless-N 100" }, 102 { 0x8086, 0x4229, "Intel Wireless WiFi Link 4965" }, 103 { 0x8086, 0x422b, "Intel Centrino Ultimate-N 6300" }, 104 { 0x8086, 0x422c, "Intel Centrino Advanced-N 6200" }, 105 { 0x8086, 0x422d, "Intel Wireless WiFi Link 4965" }, 106 { 0x8086, 0x4230, "Intel Wireless WiFi Link 4965" }, 107 { 0x8086, 0x4232, "Intel WiFi Link 5100" }, 108 { 0x8086, 0x4233, "Intel Wireless WiFi Link 4965" }, 109 { 0x8086, 0x4235, "Intel Ultimate N WiFi Link 5300" }, 110 { 0x8086, 0x4236, "Intel Ultimate N WiFi Link 5300" }, 111 { 0x8086, 0x4237, "Intel WiFi Link 5100" }, 112 { 0x8086, 0x4238, "Intel Centrino Ultimate-N 6300" }, 113 { 0x8086, 0x4239, "Intel Centrino Advanced-N 6200" }, 114 { 0x8086, 0x423a, "Intel WiMAX/WiFi Link 5350" }, 115 { 0x8086, 0x423b, "Intel WiMAX/WiFi Link 5350" }, 116 { 0x8086, 0x423c, "Intel WiMAX/WiFi Link 5150" }, 117 { 0x8086, 0x423d, "Intel WiMAX/WiFi Link 5150" }, 118 { 0, 0, NULL } 119 }; 120 121 static int iwn_probe(device_t); 122 static int iwn_attach(device_t); 123 static int iwn4965_attach(struct iwn_softc *, uint16_t); 124 static int iwn5000_attach(struct iwn_softc *, uint16_t); 125 static void iwn_radiotap_attach(struct iwn_softc *); 126 static void iwn_sysctlattach(struct iwn_softc *); 127 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 128 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 129 const uint8_t [IEEE80211_ADDR_LEN], 130 const uint8_t [IEEE80211_ADDR_LEN]); 131 static void iwn_vap_delete(struct ieee80211vap *); 132 static int iwn_detach(device_t); 133 static int iwn_shutdown(device_t); 134 static int iwn_suspend(device_t); 135 static int iwn_resume(device_t); 136 static int iwn_nic_lock(struct iwn_softc *); 137 static int iwn_eeprom_lock(struct iwn_softc *); 138 static int iwn_init_otprom(struct iwn_softc *); 139 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 140 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 141 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 142 void **, bus_size_t, bus_size_t); 143 static void iwn_dma_contig_free(struct iwn_dma_info *); 144 static int iwn_alloc_sched(struct iwn_softc *); 145 static void iwn_free_sched(struct iwn_softc *); 146 static int iwn_alloc_kw(struct iwn_softc *); 147 static void iwn_free_kw(struct iwn_softc *); 148 static int iwn_alloc_ict(struct iwn_softc *); 149 static void iwn_free_ict(struct iwn_softc *); 150 static int iwn_alloc_fwmem(struct iwn_softc *); 151 static void iwn_free_fwmem(struct iwn_softc *); 152 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 153 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 154 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 155 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 156 int); 157 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 158 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 159 static void iwn5000_ict_reset(struct iwn_softc *); 160 static int iwn_read_eeprom(struct iwn_softc *, 161 uint8_t macaddr[IEEE80211_ADDR_LEN]); 162 static void iwn4965_read_eeprom(struct iwn_softc *); 163 static void iwn4965_print_power_group(struct iwn_softc *, int); 164 static void iwn5000_read_eeprom(struct iwn_softc *); 165 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 166 static void iwn_read_eeprom_band(struct iwn_softc *, int); 167 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 168 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 169 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 170 struct ieee80211_channel *); 171 static int iwn_setregdomain(struct ieee80211com *, 172 struct ieee80211_regdomain *, int, 173 struct ieee80211_channel[]); 174 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 175 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 176 const uint8_t mac[IEEE80211_ADDR_LEN]); 177 static void iwn_newassoc(struct ieee80211_node *, int); 178 static int iwn_media_change(struct ifnet *); 179 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 180 static void iwn_calib_timeout(void *); 181 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 182 struct iwn_rx_data *); 183 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 184 struct iwn_rx_data *); 185 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 186 struct iwn_rx_data *); 187 static void iwn5000_rx_calib_results(struct iwn_softc *, 188 struct iwn_rx_desc *, struct iwn_rx_data *); 189 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 190 struct iwn_rx_data *); 191 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 192 struct iwn_rx_data *); 193 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 194 struct iwn_rx_data *); 195 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 196 uint8_t); 197 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 198 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 199 static void iwn_notif_intr(struct iwn_softc *); 200 static void iwn_wakeup_intr(struct iwn_softc *); 201 static void iwn_rftoggle_intr(struct iwn_softc *); 202 static void iwn_fatal_intr(struct iwn_softc *); 203 static void iwn_intr(void *); 204 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 205 uint16_t); 206 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 207 uint16_t); 208 #ifdef notyet 209 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 210 #endif 211 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 212 struct ieee80211_node *); 213 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 214 struct ieee80211_node *, 215 const struct ieee80211_bpf_params *params); 216 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 217 const struct ieee80211_bpf_params *); 218 static void iwn_start(struct ifnet *); 219 static void iwn_start_locked(struct ifnet *); 220 static void iwn_watchdog(void *); 221 static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 222 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 223 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 224 int); 225 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 226 int); 227 static int iwn_set_link_quality(struct iwn_softc *, 228 struct ieee80211_node *); 229 static int iwn_add_broadcast_node(struct iwn_softc *, int); 230 static int iwn_updateedca(struct ieee80211com *); 231 static void iwn_update_mcast(struct ifnet *); 232 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 233 static int iwn_set_critical_temp(struct iwn_softc *); 234 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 235 static void iwn4965_power_calibration(struct iwn_softc *, int); 236 static int iwn4965_set_txpower(struct iwn_softc *, 237 struct ieee80211_channel *, int); 238 static int iwn5000_set_txpower(struct iwn_softc *, 239 struct ieee80211_channel *, int); 240 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 241 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 242 static int iwn_get_noise(const struct iwn_rx_general_stats *); 243 static int iwn4965_get_temperature(struct iwn_softc *); 244 static int iwn5000_get_temperature(struct iwn_softc *); 245 static int iwn_init_sensitivity(struct iwn_softc *); 246 static void iwn_collect_noise(struct iwn_softc *, 247 const struct iwn_rx_general_stats *); 248 static int iwn4965_init_gains(struct iwn_softc *); 249 static int iwn5000_init_gains(struct iwn_softc *); 250 static int iwn4965_set_gains(struct iwn_softc *); 251 static int iwn5000_set_gains(struct iwn_softc *); 252 static void iwn_tune_sensitivity(struct iwn_softc *, 253 const struct iwn_rx_stats *); 254 static int iwn_send_sensitivity(struct iwn_softc *); 255 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 256 static int iwn_send_btcoex(struct iwn_softc *); 257 static int iwn_send_advanced_btcoex(struct iwn_softc *); 258 static int iwn5000_runtime_calib(struct iwn_softc *); 259 static int iwn_config(struct iwn_softc *); 260 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 261 static int iwn_scan(struct iwn_softc *); 262 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 263 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 264 static int iwn_ampdu_rx_start(struct ieee80211_node *, 265 struct ieee80211_rx_ampdu *, int, int, int); 266 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 267 struct ieee80211_rx_ampdu *); 268 static int iwn_addba_request(struct ieee80211_node *, 269 struct ieee80211_tx_ampdu *, int, int, int); 270 static int iwn_addba_response(struct ieee80211_node *, 271 struct ieee80211_tx_ampdu *, int, int, int); 272 static int iwn_ampdu_tx_start(struct ieee80211com *, 273 struct ieee80211_node *, uint8_t); 274 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 275 struct ieee80211_tx_ampdu *); 276 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 277 struct ieee80211_node *, int, uint8_t, uint16_t); 278 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 279 uint8_t, uint16_t); 280 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 281 struct ieee80211_node *, int, uint8_t, uint16_t); 282 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 283 uint8_t, uint16_t); 284 static int iwn5000_query_calibration(struct iwn_softc *); 285 static int iwn5000_send_calibration(struct iwn_softc *); 286 static int iwn5000_send_wimax_coex(struct iwn_softc *); 287 static int iwn5000_crystal_calib(struct iwn_softc *); 288 static int iwn5000_temp_offset_calib(struct iwn_softc *); 289 static int iwn4965_post_alive(struct iwn_softc *); 290 static int iwn5000_post_alive(struct iwn_softc *); 291 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 292 int); 293 static int iwn4965_load_firmware(struct iwn_softc *); 294 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 295 const uint8_t *, int); 296 static int iwn5000_load_firmware(struct iwn_softc *); 297 static int iwn_read_firmware_leg(struct iwn_softc *, 298 struct iwn_fw_info *); 299 static int iwn_read_firmware_tlv(struct iwn_softc *, 300 struct iwn_fw_info *, uint16_t); 301 static int iwn_read_firmware(struct iwn_softc *); 302 static int iwn_clock_wait(struct iwn_softc *); 303 static int iwn_apm_init(struct iwn_softc *); 304 static void iwn_apm_stop_master(struct iwn_softc *); 305 static void iwn_apm_stop(struct iwn_softc *); 306 static int iwn4965_nic_config(struct iwn_softc *); 307 static int iwn5000_nic_config(struct iwn_softc *); 308 static int iwn_hw_prepare(struct iwn_softc *); 309 static int iwn_hw_init(struct iwn_softc *); 310 static void iwn_hw_stop(struct iwn_softc *); 311 static void iwn_radio_on(void *, int); 312 static void iwn_radio_off(void *, int); 313 static void iwn_init_locked(struct iwn_softc *); 314 static void iwn_init(void *); 315 static void iwn_stop_locked(struct iwn_softc *); 316 static void iwn_stop(struct iwn_softc *); 317 static void iwn_scan_start(struct ieee80211com *); 318 static void iwn_scan_end(struct ieee80211com *); 319 static void iwn_set_channel(struct ieee80211com *); 320 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 321 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 322 static void iwn_hw_reset(void *, int); 323 324 #define IWN_DEBUG 325 #ifdef IWN_DEBUG 326 enum { 327 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 328 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 329 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 330 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 331 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 332 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 333 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 334 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 335 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 336 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 337 IWN_DEBUG_NODE = 0x00000400, /* node management */ 338 IWN_DEBUG_LED = 0x00000800, /* led management */ 339 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 340 IWN_DEBUG_TXRATE = 0x00002000, /* TX rate debugging */ 341 IWN_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */ 342 IWN_DEBUG_TRACE = 0x40000000, /* Print begin and start driver function */ 343 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 344 IWN_DEBUG_ANY = 0xffffffff 345 }; 346 347 #define DPRINTF(sc, m, fmt, ...) do { \ 348 if (sc->sc_debug & (m)) \ 349 printf(fmt, __VA_ARGS__); \ 350 } while (0) 351 352 static const char * 353 iwn_intr_str(uint8_t cmd) 354 { 355 switch (cmd) { 356 /* Notifications */ 357 case IWN_UC_READY: return "UC_READY"; 358 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 359 case IWN_TX_DONE: return "TX_DONE"; 360 case IWN_START_SCAN: return "START_SCAN"; 361 case IWN_STOP_SCAN: return "STOP_SCAN"; 362 case IWN_RX_STATISTICS: return "RX_STATS"; 363 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 364 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 365 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 366 case IWN_RX_PHY: return "RX_PHY"; 367 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 368 case IWN_RX_DONE: return "RX_DONE"; 369 370 /* Command Notifications */ 371 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 372 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 373 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 374 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 375 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 376 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 377 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 378 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 379 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 380 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 381 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 382 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 383 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 384 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 385 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 386 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 387 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 388 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 389 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 390 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 391 } 392 return "UNKNOWN INTR NOTIF/CMD"; 393 } 394 #else 395 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 396 #endif 397 398 static device_method_t iwn_methods[] = { 399 /* Device interface */ 400 DEVMETHOD(device_probe, iwn_probe), 401 DEVMETHOD(device_attach, iwn_attach), 402 DEVMETHOD(device_detach, iwn_detach), 403 DEVMETHOD(device_shutdown, iwn_shutdown), 404 DEVMETHOD(device_suspend, iwn_suspend), 405 DEVMETHOD(device_resume, iwn_resume), 406 { 0, 0 } 407 }; 408 409 static driver_t iwn_driver = { 410 "iwn", 411 iwn_methods, 412 sizeof(struct iwn_softc) 413 }; 414 static devclass_t iwn_devclass; 415 416 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); 417 418 MODULE_VERSION(iwn, 1); 419 420 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 421 MODULE_DEPEND(iwn, pci, 1, 1, 1); 422 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 423 424 static int 425 iwn_probe(device_t dev) 426 { 427 const struct iwn_ident *ident; 428 429 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 430 if (pci_get_vendor(dev) == ident->vendor && 431 pci_get_device(dev) == ident->device) { 432 device_set_desc(dev, ident->name); 433 return 0; 434 } 435 } 436 return ENXIO; 437 } 438 439 static int 440 iwn_attach(device_t dev) 441 { 442 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 443 struct ieee80211com *ic; 444 struct ifnet *ifp; 445 uint32_t reg; 446 int i, error, result; 447 uint8_t macaddr[IEEE80211_ADDR_LEN]; 448 449 sc->sc_dev = dev; 450 451 #ifdef IWN_DEBUG 452 error = resource_int_value(device_get_name(sc->sc_dev), 453 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 454 if (error != 0) 455 sc->sc_debug = 0; 456 #else 457 sc->sc_debug = 0; 458 #endif 459 460 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 461 462 /* 463 * Get the offset of the PCI Express Capability Structure in PCI 464 * Configuration Space. 465 */ 466 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 467 if (error != 0) { 468 device_printf(dev, "PCIe capability structure not found!\n"); 469 return error; 470 } 471 472 /* Clear device-specific "PCI retry timeout" register (41h). */ 473 pci_write_config(dev, 0x41, 0, 1); 474 475 /* Hardware bug workaround. */ 476 reg = pci_read_config(dev, PCIR_COMMAND, 1); 477 if (reg & PCIM_CMD_INTxDIS) { 478 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 479 __func__); 480 reg &= ~PCIM_CMD_INTxDIS; 481 pci_write_config(dev, PCIR_COMMAND, reg, 1); 482 } 483 484 /* Enable bus-mastering. */ 485 pci_enable_busmaster(dev); 486 487 sc->mem_rid = PCIR_BAR(0); 488 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 489 RF_ACTIVE); 490 if (sc->mem == NULL) { 491 device_printf(dev, "can't map mem space\n"); 492 error = ENOMEM; 493 return error; 494 } 495 sc->sc_st = rman_get_bustag(sc->mem); 496 sc->sc_sh = rman_get_bushandle(sc->mem); 497 498 sc->irq_rid = 0; 499 if ((result = pci_msi_count(dev)) == 1 && 500 pci_alloc_msi(dev, &result) == 0) 501 sc->irq_rid = 1; 502 /* Install interrupt handler. */ 503 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 504 RF_ACTIVE | RF_SHAREABLE); 505 if (sc->irq == NULL) { 506 device_printf(dev, "can't map interrupt\n"); 507 error = ENOMEM; 508 goto fail; 509 } 510 511 IWN_LOCK_INIT(sc); 512 513 /* Read hardware revision and attach. */ 514 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf; 515 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 516 error = iwn4965_attach(sc, pci_get_device(dev)); 517 else 518 error = iwn5000_attach(sc, pci_get_device(dev)); 519 if (error != 0) { 520 device_printf(dev, "could not attach device, error %d\n", 521 error); 522 goto fail; 523 } 524 525 if ((error = iwn_hw_prepare(sc)) != 0) { 526 device_printf(dev, "hardware not ready, error %d\n", error); 527 goto fail; 528 } 529 530 /* Allocate DMA memory for firmware transfers. */ 531 if ((error = iwn_alloc_fwmem(sc)) != 0) { 532 device_printf(dev, 533 "could not allocate memory for firmware, error %d\n", 534 error); 535 goto fail; 536 } 537 538 /* Allocate "Keep Warm" page. */ 539 if ((error = iwn_alloc_kw(sc)) != 0) { 540 device_printf(dev, 541 "could not allocate keep warm page, error %d\n", error); 542 goto fail; 543 } 544 545 /* Allocate ICT table for 5000 Series. */ 546 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 547 (error = iwn_alloc_ict(sc)) != 0) { 548 device_printf(dev, "could not allocate ICT table, error %d\n", 549 error); 550 goto fail; 551 } 552 553 /* Allocate TX scheduler "rings". */ 554 if ((error = iwn_alloc_sched(sc)) != 0) { 555 device_printf(dev, 556 "could not allocate TX scheduler rings, error %d\n", error); 557 goto fail; 558 } 559 560 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 561 for (i = 0; i < sc->ntxqs; i++) { 562 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 563 device_printf(dev, 564 "could not allocate TX ring %d, error %d\n", i, 565 error); 566 goto fail; 567 } 568 } 569 570 /* Allocate RX ring. */ 571 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 572 device_printf(dev, "could not allocate RX ring, error %d\n", 573 error); 574 goto fail; 575 } 576 577 /* Clear pending interrupts. */ 578 IWN_WRITE(sc, IWN_INT, 0xffffffff); 579 580 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 581 if (ifp == NULL) { 582 device_printf(dev, "can not allocate ifnet structure\n"); 583 goto fail; 584 } 585 586 ic = ifp->if_l2com; 587 ic->ic_ifp = ifp; 588 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 589 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 590 591 /* Set device capabilities. */ 592 ic->ic_caps = 593 IEEE80211_C_STA /* station mode supported */ 594 | IEEE80211_C_MONITOR /* monitor mode supported */ 595 | IEEE80211_C_BGSCAN /* background scanning */ 596 | IEEE80211_C_TXPMGT /* tx power management */ 597 | IEEE80211_C_SHSLOT /* short slot time supported */ 598 | IEEE80211_C_WPA 599 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 600 #if 0 601 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 602 #endif 603 | IEEE80211_C_WME /* WME */ 604 | IEEE80211_C_PMGT /* Station-side power mgmt */ 605 ; 606 607 /* Read MAC address, channels, etc from EEPROM. */ 608 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 609 device_printf(dev, "could not read EEPROM, error %d\n", 610 error); 611 goto fail; 612 } 613 614 /* Count the number of available chains. */ 615 sc->ntxchains = 616 ((sc->txchainmask >> 2) & 1) + 617 ((sc->txchainmask >> 1) & 1) + 618 ((sc->txchainmask >> 0) & 1); 619 sc->nrxchains = 620 ((sc->rxchainmask >> 2) & 1) + 621 ((sc->rxchainmask >> 1) & 1) + 622 ((sc->rxchainmask >> 0) & 1); 623 if (bootverbose) { 624 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 625 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 626 macaddr, ":"); 627 } 628 629 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 630 ic->ic_rxstream = sc->nrxchains; 631 ic->ic_txstream = sc->ntxchains; 632 ic->ic_htcaps = 633 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 634 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 635 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 636 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 637 #ifdef notyet 638 | IEEE80211_HTCAP_GREENFIELD 639 #if IWN_RBUF_SIZE == 8192 640 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 641 #else 642 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 643 #endif 644 #endif 645 /* s/w capabilities */ 646 | IEEE80211_HTC_HT /* HT operation */ 647 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 648 #ifdef notyet 649 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 650 #endif 651 ; 652 } 653 654 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 655 ifp->if_softc = sc; 656 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 657 ifp->if_init = iwn_init; 658 ifp->if_ioctl = iwn_ioctl; 659 ifp->if_start = iwn_start; 660 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 661 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 662 IFQ_SET_READY(&ifp->if_snd); 663 664 ieee80211_ifattach(ic, macaddr); 665 ic->ic_vap_create = iwn_vap_create; 666 ic->ic_vap_delete = iwn_vap_delete; 667 ic->ic_raw_xmit = iwn_raw_xmit; 668 ic->ic_node_alloc = iwn_node_alloc; 669 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 670 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 671 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 672 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 673 sc->sc_addba_request = ic->ic_addba_request; 674 ic->ic_addba_request = iwn_addba_request; 675 sc->sc_addba_response = ic->ic_addba_response; 676 ic->ic_addba_response = iwn_addba_response; 677 sc->sc_addba_stop = ic->ic_addba_stop; 678 ic->ic_addba_stop = iwn_ampdu_tx_stop; 679 ic->ic_newassoc = iwn_newassoc; 680 ic->ic_wme.wme_update = iwn_updateedca; 681 ic->ic_update_mcast = iwn_update_mcast; 682 ic->ic_scan_start = iwn_scan_start; 683 ic->ic_scan_end = iwn_scan_end; 684 ic->ic_set_channel = iwn_set_channel; 685 ic->ic_scan_curchan = iwn_scan_curchan; 686 ic->ic_scan_mindwell = iwn_scan_mindwell; 687 ic->ic_setregdomain = iwn_setregdomain; 688 689 iwn_radiotap_attach(sc); 690 691 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 692 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 693 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 694 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 695 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 696 697 iwn_sysctlattach(sc); 698 699 /* 700 * Hook our interrupt after all initialization is complete. 701 */ 702 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 703 NULL, iwn_intr, sc, &sc->sc_ih); 704 if (error != 0) { 705 device_printf(dev, "can't establish interrupt, error %d\n", 706 error); 707 goto fail; 708 } 709 710 if (bootverbose) 711 ieee80211_announce(ic); 712 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 713 return 0; 714 fail: 715 iwn_detach(dev); 716 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 717 return error; 718 } 719 720 static int 721 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 722 { 723 struct iwn_ops *ops = &sc->ops; 724 725 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 726 ops->load_firmware = iwn4965_load_firmware; 727 ops->read_eeprom = iwn4965_read_eeprom; 728 ops->post_alive = iwn4965_post_alive; 729 ops->nic_config = iwn4965_nic_config; 730 ops->update_sched = iwn4965_update_sched; 731 ops->get_temperature = iwn4965_get_temperature; 732 ops->get_rssi = iwn4965_get_rssi; 733 ops->set_txpower = iwn4965_set_txpower; 734 ops->init_gains = iwn4965_init_gains; 735 ops->set_gains = iwn4965_set_gains; 736 ops->add_node = iwn4965_add_node; 737 ops->tx_done = iwn4965_tx_done; 738 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 739 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 740 sc->ntxqs = IWN4965_NTXQUEUES; 741 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 742 sc->ndmachnls = IWN4965_NDMACHNLS; 743 sc->broadcast_id = IWN4965_ID_BROADCAST; 744 sc->rxonsz = IWN4965_RXONSZ; 745 sc->schedsz = IWN4965_SCHEDSZ; 746 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 747 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 748 sc->fwsz = IWN4965_FWSZ; 749 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 750 sc->limits = &iwn4965_sensitivity_limits; 751 sc->fwname = "iwn4965fw"; 752 /* Override chains masks, ROM is known to be broken. */ 753 sc->txchainmask = IWN_ANT_AB; 754 sc->rxchainmask = IWN_ANT_ABC; 755 756 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 757 758 return 0; 759 } 760 761 static int 762 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 763 { 764 struct iwn_ops *ops = &sc->ops; 765 766 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 767 768 ops->load_firmware = iwn5000_load_firmware; 769 ops->read_eeprom = iwn5000_read_eeprom; 770 ops->post_alive = iwn5000_post_alive; 771 ops->nic_config = iwn5000_nic_config; 772 ops->update_sched = iwn5000_update_sched; 773 ops->get_temperature = iwn5000_get_temperature; 774 ops->get_rssi = iwn5000_get_rssi; 775 ops->set_txpower = iwn5000_set_txpower; 776 ops->init_gains = iwn5000_init_gains; 777 ops->set_gains = iwn5000_set_gains; 778 ops->add_node = iwn5000_add_node; 779 ops->tx_done = iwn5000_tx_done; 780 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 781 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 782 sc->ntxqs = IWN5000_NTXQUEUES; 783 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 784 sc->ndmachnls = IWN5000_NDMACHNLS; 785 sc->broadcast_id = IWN5000_ID_BROADCAST; 786 sc->rxonsz = IWN5000_RXONSZ; 787 sc->schedsz = IWN5000_SCHEDSZ; 788 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 789 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 790 sc->fwsz = IWN5000_FWSZ; 791 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 792 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 793 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 794 795 switch (sc->hw_type) { 796 case IWN_HW_REV_TYPE_5100: 797 sc->limits = &iwn5000_sensitivity_limits; 798 sc->fwname = "iwn5000fw"; 799 /* Override chains masks, ROM is known to be broken. */ 800 sc->txchainmask = IWN_ANT_B; 801 sc->rxchainmask = IWN_ANT_AB; 802 break; 803 case IWN_HW_REV_TYPE_5150: 804 sc->limits = &iwn5150_sensitivity_limits; 805 sc->fwname = "iwn5150fw"; 806 break; 807 case IWN_HW_REV_TYPE_5300: 808 case IWN_HW_REV_TYPE_5350: 809 sc->limits = &iwn5000_sensitivity_limits; 810 sc->fwname = "iwn5000fw"; 811 break; 812 case IWN_HW_REV_TYPE_1000: 813 sc->limits = &iwn1000_sensitivity_limits; 814 sc->fwname = "iwn1000fw"; 815 break; 816 case IWN_HW_REV_TYPE_6000: 817 sc->limits = &iwn6000_sensitivity_limits; 818 sc->fwname = "iwn6000fw"; 819 if (pid == 0x422c || pid == 0x4239) { 820 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 821 /* Override chains masks, ROM is known to be broken. */ 822 sc->txchainmask = IWN_ANT_BC; 823 sc->rxchainmask = IWN_ANT_BC; 824 } 825 break; 826 case IWN_HW_REV_TYPE_6050: 827 sc->limits = &iwn6000_sensitivity_limits; 828 sc->fwname = "iwn6050fw"; 829 /* Override chains masks, ROM is known to be broken. */ 830 sc->txchainmask = IWN_ANT_AB; 831 sc->rxchainmask = IWN_ANT_AB; 832 break; 833 case IWN_HW_REV_TYPE_6005: 834 sc->limits = &iwn6000_sensitivity_limits; 835 if (pid != 0x0082 && pid != 0x0085) { 836 sc->fwname = "iwn6000g2bfw"; 837 sc->sc_flags |= IWN_FLAG_ADV_BTCOEX; 838 } else 839 sc->fwname = "iwn6000g2afw"; 840 break; 841 default: 842 device_printf(sc->sc_dev, "adapter type %d not supported\n", 843 sc->hw_type); 844 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 845 return ENOTSUP; 846 } 847 return 0; 848 } 849 850 /* 851 * Attach the interface to 802.11 radiotap. 852 */ 853 static void 854 iwn_radiotap_attach(struct iwn_softc *sc) 855 { 856 struct ifnet *ifp = sc->sc_ifp; 857 struct ieee80211com *ic = ifp->if_l2com; 858 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 859 ieee80211_radiotap_attach(ic, 860 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 861 IWN_TX_RADIOTAP_PRESENT, 862 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 863 IWN_RX_RADIOTAP_PRESENT); 864 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 865 } 866 867 static void 868 iwn_sysctlattach(struct iwn_softc *sc) 869 { 870 #ifdef IWN_DEBUG 871 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 872 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 873 874 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 875 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 876 "control debugging printfs"); 877 #endif 878 } 879 880 static struct ieee80211vap * 881 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 882 enum ieee80211_opmode opmode, int flags, 883 const uint8_t bssid[IEEE80211_ADDR_LEN], 884 const uint8_t mac[IEEE80211_ADDR_LEN]) 885 { 886 struct iwn_vap *ivp; 887 struct ieee80211vap *vap; 888 889 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 890 return NULL; 891 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 892 M_80211_VAP, M_NOWAIT | M_ZERO); 893 if (ivp == NULL) 894 return NULL; 895 vap = &ivp->iv_vap; 896 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 897 vap->iv_bmissthreshold = 10; /* override default */ 898 /* Override with driver methods. */ 899 ivp->iv_newstate = vap->iv_newstate; 900 vap->iv_newstate = iwn_newstate; 901 902 ieee80211_ratectl_init(vap); 903 /* Complete setup. */ 904 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 905 ic->ic_opmode = opmode; 906 return vap; 907 } 908 909 static void 910 iwn_vap_delete(struct ieee80211vap *vap) 911 { 912 struct iwn_vap *ivp = IWN_VAP(vap); 913 914 ieee80211_ratectl_deinit(vap); 915 ieee80211_vap_detach(vap); 916 free(ivp, M_80211_VAP); 917 } 918 919 static int 920 iwn_detach(device_t dev) 921 { 922 struct iwn_softc *sc = device_get_softc(dev); 923 struct ifnet *ifp = sc->sc_ifp; 924 struct ieee80211com *ic; 925 int qid; 926 927 if (ifp != NULL) { 928 ic = ifp->if_l2com; 929 930 ieee80211_draintask(ic, &sc->sc_reinit_task); 931 ieee80211_draintask(ic, &sc->sc_radioon_task); 932 ieee80211_draintask(ic, &sc->sc_radiooff_task); 933 934 iwn_stop(sc); 935 callout_drain(&sc->watchdog_to); 936 callout_drain(&sc->calib_to); 937 ieee80211_ifdetach(ic); 938 } 939 940 /* Uninstall interrupt handler. */ 941 if (sc->irq != NULL) { 942 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 943 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 944 if (sc->irq_rid == 1) 945 pci_release_msi(dev); 946 } 947 948 /* Free DMA resources. */ 949 iwn_free_rx_ring(sc, &sc->rxq); 950 for (qid = 0; qid < sc->ntxqs; qid++) 951 iwn_free_tx_ring(sc, &sc->txq[qid]); 952 iwn_free_sched(sc); 953 iwn_free_kw(sc); 954 if (sc->ict != NULL) 955 iwn_free_ict(sc); 956 iwn_free_fwmem(sc); 957 958 if (sc->mem != NULL) 959 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 960 961 if (ifp != NULL) 962 if_free(ifp); 963 964 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s done\n", __func__); 965 IWN_LOCK_DESTROY(sc); 966 return 0; 967 } 968 969 static int 970 iwn_shutdown(device_t dev) 971 { 972 struct iwn_softc *sc = device_get_softc(dev); 973 974 iwn_stop(sc); 975 return 0; 976 } 977 978 static int 979 iwn_suspend(device_t dev) 980 { 981 struct iwn_softc *sc = device_get_softc(dev); 982 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 983 984 ieee80211_suspend_all(ic); 985 return 0; 986 } 987 988 static int 989 iwn_resume(device_t dev) 990 { 991 struct iwn_softc *sc = device_get_softc(dev); 992 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 993 994 /* Clear device-specific "PCI retry timeout" register (41h). */ 995 pci_write_config(dev, 0x41, 0, 1); 996 997 ieee80211_resume_all(ic); 998 return 0; 999 } 1000 1001 static int 1002 iwn_nic_lock(struct iwn_softc *sc) 1003 { 1004 int ntries; 1005 1006 /* Request exclusive access to NIC. */ 1007 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1008 1009 /* Spin until we actually get the lock. */ 1010 for (ntries = 0; ntries < 1000; ntries++) { 1011 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1012 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1013 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1014 return 0; 1015 DELAY(10); 1016 } 1017 return ETIMEDOUT; 1018 } 1019 1020 static __inline void 1021 iwn_nic_unlock(struct iwn_softc *sc) 1022 { 1023 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1024 } 1025 1026 static __inline uint32_t 1027 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1028 { 1029 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1030 IWN_BARRIER_READ_WRITE(sc); 1031 return IWN_READ(sc, IWN_PRPH_RDATA); 1032 } 1033 1034 static __inline void 1035 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1036 { 1037 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1038 IWN_BARRIER_WRITE(sc); 1039 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1040 } 1041 1042 static __inline void 1043 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1044 { 1045 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1046 } 1047 1048 static __inline void 1049 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1050 { 1051 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1052 } 1053 1054 static __inline void 1055 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1056 const uint32_t *data, int count) 1057 { 1058 for (; count > 0; count--, data++, addr += 4) 1059 iwn_prph_write(sc, addr, *data); 1060 } 1061 1062 static __inline uint32_t 1063 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1064 { 1065 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1066 IWN_BARRIER_READ_WRITE(sc); 1067 return IWN_READ(sc, IWN_MEM_RDATA); 1068 } 1069 1070 static __inline void 1071 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1072 { 1073 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1074 IWN_BARRIER_WRITE(sc); 1075 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1076 } 1077 1078 static __inline void 1079 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1080 { 1081 uint32_t tmp; 1082 1083 tmp = iwn_mem_read(sc, addr & ~3); 1084 if (addr & 3) 1085 tmp = (tmp & 0x0000ffff) | data << 16; 1086 else 1087 tmp = (tmp & 0xffff0000) | data; 1088 iwn_mem_write(sc, addr & ~3, tmp); 1089 } 1090 1091 static __inline void 1092 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1093 int count) 1094 { 1095 for (; count > 0; count--, addr += 4) 1096 *data++ = iwn_mem_read(sc, addr); 1097 } 1098 1099 static __inline void 1100 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1101 int count) 1102 { 1103 for (; count > 0; count--, addr += 4) 1104 iwn_mem_write(sc, addr, val); 1105 } 1106 1107 static int 1108 iwn_eeprom_lock(struct iwn_softc *sc) 1109 { 1110 int i, ntries; 1111 1112 for (i = 0; i < 100; i++) { 1113 /* Request exclusive access to EEPROM. */ 1114 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1115 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1116 1117 /* Spin until we actually get the lock. */ 1118 for (ntries = 0; ntries < 100; ntries++) { 1119 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1120 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1121 return 0; 1122 DELAY(10); 1123 } 1124 } 1125 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1126 return ETIMEDOUT; 1127 } 1128 1129 static __inline void 1130 iwn_eeprom_unlock(struct iwn_softc *sc) 1131 { 1132 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1133 } 1134 1135 /* 1136 * Initialize access by host to One Time Programmable ROM. 1137 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1138 */ 1139 static int 1140 iwn_init_otprom(struct iwn_softc *sc) 1141 { 1142 uint16_t prev, base, next; 1143 int count, error; 1144 1145 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1146 1147 /* Wait for clock stabilization before accessing prph. */ 1148 if ((error = iwn_clock_wait(sc)) != 0) 1149 return error; 1150 1151 if ((error = iwn_nic_lock(sc)) != 0) 1152 return error; 1153 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1154 DELAY(5); 1155 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1156 iwn_nic_unlock(sc); 1157 1158 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1159 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1160 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1161 IWN_RESET_LINK_PWR_MGMT_DIS); 1162 } 1163 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1164 /* Clear ECC status. */ 1165 IWN_SETBITS(sc, IWN_OTP_GP, 1166 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1167 1168 /* 1169 * Find the block before last block (contains the EEPROM image) 1170 * for HW without OTP shadow RAM. 1171 */ 1172 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1173 /* Switch to absolute addressing mode. */ 1174 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1175 base = prev = 0; 1176 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1177 error = iwn_read_prom_data(sc, base, &next, 2); 1178 if (error != 0) 1179 return error; 1180 if (next == 0) /* End of linked-list. */ 1181 break; 1182 prev = base; 1183 base = le16toh(next); 1184 } 1185 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1186 return EIO; 1187 /* Skip "next" word. */ 1188 sc->prom_base = prev + 1; 1189 } 1190 1191 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1192 1193 return 0; 1194 } 1195 1196 static int 1197 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1198 { 1199 uint8_t *out = data; 1200 uint32_t val, tmp; 1201 int ntries; 1202 1203 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1204 1205 addr += sc->prom_base; 1206 for (; count > 0; count -= 2, addr++) { 1207 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1208 for (ntries = 0; ntries < 10; ntries++) { 1209 val = IWN_READ(sc, IWN_EEPROM); 1210 if (val & IWN_EEPROM_READ_VALID) 1211 break; 1212 DELAY(5); 1213 } 1214 if (ntries == 10) { 1215 device_printf(sc->sc_dev, 1216 "timeout reading ROM at 0x%x\n", addr); 1217 return ETIMEDOUT; 1218 } 1219 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1220 /* OTPROM, check for ECC errors. */ 1221 tmp = IWN_READ(sc, IWN_OTP_GP); 1222 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1223 device_printf(sc->sc_dev, 1224 "OTPROM ECC error at 0x%x\n", addr); 1225 return EIO; 1226 } 1227 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1228 /* Correctable ECC error, clear bit. */ 1229 IWN_SETBITS(sc, IWN_OTP_GP, 1230 IWN_OTP_GP_ECC_CORR_STTS); 1231 } 1232 } 1233 *out++ = val >> 16; 1234 if (count > 1) 1235 *out++ = val >> 24; 1236 } 1237 1238 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1239 1240 return 0; 1241 } 1242 1243 static void 1244 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1245 { 1246 if (error != 0) 1247 return; 1248 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1249 *(bus_addr_t *)arg = segs[0].ds_addr; 1250 } 1251 1252 static int 1253 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1254 void **kvap, bus_size_t size, bus_size_t alignment) 1255 { 1256 int error; 1257 1258 dma->tag = NULL; 1259 dma->size = size; 1260 1261 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1262 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1263 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1264 if (error != 0) 1265 goto fail; 1266 1267 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1268 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1269 if (error != 0) 1270 goto fail; 1271 1272 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1273 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1274 if (error != 0) 1275 goto fail; 1276 1277 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1278 1279 if (kvap != NULL) 1280 *kvap = dma->vaddr; 1281 1282 return 0; 1283 1284 fail: iwn_dma_contig_free(dma); 1285 return error; 1286 } 1287 1288 static void 1289 iwn_dma_contig_free(struct iwn_dma_info *dma) 1290 { 1291 if (dma->map != NULL) { 1292 if (dma->vaddr != NULL) { 1293 bus_dmamap_sync(dma->tag, dma->map, 1294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1295 bus_dmamap_unload(dma->tag, dma->map); 1296 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1297 dma->vaddr = NULL; 1298 } 1299 bus_dmamap_destroy(dma->tag, dma->map); 1300 dma->map = NULL; 1301 } 1302 if (dma->tag != NULL) { 1303 bus_dma_tag_destroy(dma->tag); 1304 dma->tag = NULL; 1305 } 1306 } 1307 1308 static int 1309 iwn_alloc_sched(struct iwn_softc *sc) 1310 { 1311 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1312 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1313 sc->schedsz, 1024); 1314 } 1315 1316 static void 1317 iwn_free_sched(struct iwn_softc *sc) 1318 { 1319 iwn_dma_contig_free(&sc->sched_dma); 1320 } 1321 1322 static int 1323 iwn_alloc_kw(struct iwn_softc *sc) 1324 { 1325 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1326 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1327 } 1328 1329 static void 1330 iwn_free_kw(struct iwn_softc *sc) 1331 { 1332 iwn_dma_contig_free(&sc->kw_dma); 1333 } 1334 1335 static int 1336 iwn_alloc_ict(struct iwn_softc *sc) 1337 { 1338 /* ICT table must be aligned on a 4KB boundary. */ 1339 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1340 IWN_ICT_SIZE, 4096); 1341 } 1342 1343 static void 1344 iwn_free_ict(struct iwn_softc *sc) 1345 { 1346 iwn_dma_contig_free(&sc->ict_dma); 1347 } 1348 1349 static int 1350 iwn_alloc_fwmem(struct iwn_softc *sc) 1351 { 1352 /* Must be aligned on a 16-byte boundary. */ 1353 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1354 } 1355 1356 static void 1357 iwn_free_fwmem(struct iwn_softc *sc) 1358 { 1359 iwn_dma_contig_free(&sc->fw_dma); 1360 } 1361 1362 static int 1363 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1364 { 1365 bus_size_t size; 1366 int i, error; 1367 1368 ring->cur = 0; 1369 1370 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1371 1372 /* Allocate RX descriptors (256-byte aligned). */ 1373 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1374 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1375 size, 256); 1376 if (error != 0) { 1377 device_printf(sc->sc_dev, 1378 "%s: could not allocate RX ring DMA memory, error %d\n", 1379 __func__, error); 1380 goto fail; 1381 } 1382 1383 /* Allocate RX status area (16-byte aligned). */ 1384 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1385 sizeof (struct iwn_rx_status), 16); 1386 if (error != 0) { 1387 device_printf(sc->sc_dev, 1388 "%s: could not allocate RX status DMA memory, error %d\n", 1389 __func__, error); 1390 goto fail; 1391 } 1392 1393 /* Create RX buffer DMA tag. */ 1394 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1395 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1396 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1397 &ring->data_dmat); 1398 if (error != 0) { 1399 device_printf(sc->sc_dev, 1400 "%s: could not create RX buf DMA tag, error %d\n", 1401 __func__, error); 1402 goto fail; 1403 } 1404 1405 /* 1406 * Allocate and map RX buffers. 1407 */ 1408 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1409 struct iwn_rx_data *data = &ring->data[i]; 1410 bus_addr_t paddr; 1411 1412 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1413 if (error != 0) { 1414 device_printf(sc->sc_dev, 1415 "%s: could not create RX buf DMA map, error %d\n", 1416 __func__, error); 1417 goto fail; 1418 } 1419 1420 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1421 IWN_RBUF_SIZE); 1422 if (data->m == NULL) { 1423 device_printf(sc->sc_dev, 1424 "%s: could not allocate RX mbuf\n", __func__); 1425 error = ENOBUFS; 1426 goto fail; 1427 } 1428 1429 error = bus_dmamap_load(ring->data_dmat, data->map, 1430 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1431 &paddr, BUS_DMA_NOWAIT); 1432 if (error != 0 && error != EFBIG) { 1433 device_printf(sc->sc_dev, 1434 "%s: can't not map mbuf, error %d\n", __func__, 1435 error); 1436 goto fail; 1437 } 1438 1439 /* Set physical address of RX buffer (256-byte aligned). */ 1440 ring->desc[i] = htole32(paddr >> 8); 1441 } 1442 1443 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1444 BUS_DMASYNC_PREWRITE); 1445 1446 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1447 1448 return 0; 1449 1450 fail: iwn_free_rx_ring(sc, ring); 1451 1452 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1453 1454 return error; 1455 } 1456 1457 static void 1458 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1459 { 1460 int ntries; 1461 1462 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1463 1464 if (iwn_nic_lock(sc) == 0) { 1465 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1466 for (ntries = 0; ntries < 1000; ntries++) { 1467 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1468 IWN_FH_RX_STATUS_IDLE) 1469 break; 1470 DELAY(10); 1471 } 1472 iwn_nic_unlock(sc); 1473 } 1474 ring->cur = 0; 1475 sc->last_rx_valid = 0; 1476 } 1477 1478 static void 1479 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1480 { 1481 int i; 1482 1483 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1484 1485 iwn_dma_contig_free(&ring->desc_dma); 1486 iwn_dma_contig_free(&ring->stat_dma); 1487 1488 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1489 struct iwn_rx_data *data = &ring->data[i]; 1490 1491 if (data->m != NULL) { 1492 bus_dmamap_sync(ring->data_dmat, data->map, 1493 BUS_DMASYNC_POSTREAD); 1494 bus_dmamap_unload(ring->data_dmat, data->map); 1495 m_freem(data->m); 1496 data->m = NULL; 1497 } 1498 if (data->map != NULL) 1499 bus_dmamap_destroy(ring->data_dmat, data->map); 1500 } 1501 if (ring->data_dmat != NULL) { 1502 bus_dma_tag_destroy(ring->data_dmat); 1503 ring->data_dmat = NULL; 1504 } 1505 } 1506 1507 static int 1508 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1509 { 1510 bus_addr_t paddr; 1511 bus_size_t size; 1512 int i, error; 1513 1514 ring->qid = qid; 1515 ring->queued = 0; 1516 ring->cur = 0; 1517 1518 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1519 1520 /* Allocate TX descriptors (256-byte aligned). */ 1521 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1522 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1523 size, 256); 1524 if (error != 0) { 1525 device_printf(sc->sc_dev, 1526 "%s: could not allocate TX ring DMA memory, error %d\n", 1527 __func__, error); 1528 goto fail; 1529 } 1530 1531 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1532 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1533 size, 4); 1534 if (error != 0) { 1535 device_printf(sc->sc_dev, 1536 "%s: could not allocate TX cmd DMA memory, error %d\n", 1537 __func__, error); 1538 goto fail; 1539 } 1540 1541 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1542 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1543 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1544 &ring->data_dmat); 1545 if (error != 0) { 1546 device_printf(sc->sc_dev, 1547 "%s: could not create TX buf DMA tag, error %d\n", 1548 __func__, error); 1549 goto fail; 1550 } 1551 1552 paddr = ring->cmd_dma.paddr; 1553 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1554 struct iwn_tx_data *data = &ring->data[i]; 1555 1556 data->cmd_paddr = paddr; 1557 data->scratch_paddr = paddr + 12; 1558 paddr += sizeof (struct iwn_tx_cmd); 1559 1560 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1561 if (error != 0) { 1562 device_printf(sc->sc_dev, 1563 "%s: could not create TX buf DMA map, error %d\n", 1564 __func__, error); 1565 goto fail; 1566 } 1567 } 1568 1569 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1570 1571 return 0; 1572 1573 fail: iwn_free_tx_ring(sc, ring); 1574 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 1575 return error; 1576 } 1577 1578 static void 1579 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1580 { 1581 int i; 1582 1583 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 1584 1585 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1586 struct iwn_tx_data *data = &ring->data[i]; 1587 1588 if (data->m != NULL) { 1589 bus_dmamap_sync(ring->data_dmat, data->map, 1590 BUS_DMASYNC_POSTWRITE); 1591 bus_dmamap_unload(ring->data_dmat, data->map); 1592 m_freem(data->m); 1593 data->m = NULL; 1594 } 1595 } 1596 /* Clear TX descriptors. */ 1597 memset(ring->desc, 0, ring->desc_dma.size); 1598 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1599 BUS_DMASYNC_PREWRITE); 1600 sc->qfullmsk &= ~(1 << ring->qid); 1601 ring->queued = 0; 1602 ring->cur = 0; 1603 } 1604 1605 static void 1606 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1607 { 1608 int i; 1609 1610 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1611 1612 iwn_dma_contig_free(&ring->desc_dma); 1613 iwn_dma_contig_free(&ring->cmd_dma); 1614 1615 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1616 struct iwn_tx_data *data = &ring->data[i]; 1617 1618 if (data->m != NULL) { 1619 bus_dmamap_sync(ring->data_dmat, data->map, 1620 BUS_DMASYNC_POSTWRITE); 1621 bus_dmamap_unload(ring->data_dmat, data->map); 1622 m_freem(data->m); 1623 } 1624 if (data->map != NULL) 1625 bus_dmamap_destroy(ring->data_dmat, data->map); 1626 } 1627 if (ring->data_dmat != NULL) { 1628 bus_dma_tag_destroy(ring->data_dmat); 1629 ring->data_dmat = NULL; 1630 } 1631 } 1632 1633 static void 1634 iwn5000_ict_reset(struct iwn_softc *sc) 1635 { 1636 /* Disable interrupts. */ 1637 IWN_WRITE(sc, IWN_INT_MASK, 0); 1638 1639 /* Reset ICT table. */ 1640 memset(sc->ict, 0, IWN_ICT_SIZE); 1641 sc->ict_cur = 0; 1642 1643 /* Set physical address of ICT table (4KB aligned). */ 1644 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1645 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1646 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1647 1648 /* Enable periodic RX interrupt. */ 1649 sc->int_mask |= IWN_INT_RX_PERIODIC; 1650 /* Switch to ICT interrupt mode in driver. */ 1651 sc->sc_flags |= IWN_FLAG_USE_ICT; 1652 1653 /* Re-enable interrupts. */ 1654 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1655 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1656 } 1657 1658 static int 1659 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1660 { 1661 struct iwn_ops *ops = &sc->ops; 1662 uint16_t val; 1663 int error; 1664 1665 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1666 1667 /* Check whether adapter has an EEPROM or an OTPROM. */ 1668 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1669 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1670 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1671 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1672 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1673 1674 /* Adapter has to be powered on for EEPROM access to work. */ 1675 if ((error = iwn_apm_init(sc)) != 0) { 1676 device_printf(sc->sc_dev, 1677 "%s: could not power ON adapter, error %d\n", __func__, 1678 error); 1679 return error; 1680 } 1681 1682 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1683 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1684 return EIO; 1685 } 1686 if ((error = iwn_eeprom_lock(sc)) != 0) { 1687 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 1688 __func__, error); 1689 return error; 1690 } 1691 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1692 if ((error = iwn_init_otprom(sc)) != 0) { 1693 device_printf(sc->sc_dev, 1694 "%s: could not initialize OTPROM, error %d\n", 1695 __func__, error); 1696 return error; 1697 } 1698 } 1699 1700 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1701 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 1702 /* Check if HT support is bonded out. */ 1703 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1704 sc->sc_flags |= IWN_FLAG_HAS_11N; 1705 1706 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1707 sc->rfcfg = le16toh(val); 1708 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1709 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1710 if (sc->txchainmask == 0) 1711 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1712 if (sc->rxchainmask == 0) 1713 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1714 1715 /* Read MAC address. */ 1716 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1717 1718 /* Read adapter-specific information from EEPROM. */ 1719 ops->read_eeprom(sc); 1720 1721 iwn_apm_stop(sc); /* Power OFF adapter. */ 1722 1723 iwn_eeprom_unlock(sc); 1724 1725 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1726 1727 return 0; 1728 } 1729 1730 static void 1731 iwn4965_read_eeprom(struct iwn_softc *sc) 1732 { 1733 uint32_t addr; 1734 uint16_t val; 1735 int i; 1736 1737 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1738 1739 /* Read regulatory domain (4 ASCII characters). */ 1740 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1741 1742 /* Read the list of authorized channels (20MHz ones only). */ 1743 for (i = 0; i < 7; i++) { 1744 addr = iwn4965_regulatory_bands[i]; 1745 iwn_read_eeprom_channels(sc, i, addr); 1746 } 1747 1748 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1749 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1750 sc->maxpwr2GHz = val & 0xff; 1751 sc->maxpwr5GHz = val >> 8; 1752 /* Check that EEPROM values are within valid range. */ 1753 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1754 sc->maxpwr5GHz = 38; 1755 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1756 sc->maxpwr2GHz = 38; 1757 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1758 sc->maxpwr2GHz, sc->maxpwr5GHz); 1759 1760 /* Read samples for each TX power group. */ 1761 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1762 sizeof sc->bands); 1763 1764 /* Read voltage at which samples were taken. */ 1765 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1766 sc->eeprom_voltage = (int16_t)le16toh(val); 1767 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1768 sc->eeprom_voltage); 1769 1770 #ifdef IWN_DEBUG 1771 /* Print samples. */ 1772 if (sc->sc_debug & IWN_DEBUG_ANY) { 1773 for (i = 0; i < IWN_NBANDS; i++) 1774 iwn4965_print_power_group(sc, i); 1775 } 1776 #endif 1777 1778 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1779 } 1780 1781 #ifdef IWN_DEBUG 1782 static void 1783 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1784 { 1785 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1786 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1787 int j, c; 1788 1789 printf("===band %d===\n", i); 1790 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1791 printf("chan1 num=%d\n", chans[0].num); 1792 for (c = 0; c < 2; c++) { 1793 for (j = 0; j < IWN_NSAMPLES; j++) { 1794 printf("chain %d, sample %d: temp=%d gain=%d " 1795 "power=%d pa_det=%d\n", c, j, 1796 chans[0].samples[c][j].temp, 1797 chans[0].samples[c][j].gain, 1798 chans[0].samples[c][j].power, 1799 chans[0].samples[c][j].pa_det); 1800 } 1801 } 1802 printf("chan2 num=%d\n", chans[1].num); 1803 for (c = 0; c < 2; c++) { 1804 for (j = 0; j < IWN_NSAMPLES; j++) { 1805 printf("chain %d, sample %d: temp=%d gain=%d " 1806 "power=%d pa_det=%d\n", c, j, 1807 chans[1].samples[c][j].temp, 1808 chans[1].samples[c][j].gain, 1809 chans[1].samples[c][j].power, 1810 chans[1].samples[c][j].pa_det); 1811 } 1812 } 1813 } 1814 #endif 1815 1816 static void 1817 iwn5000_read_eeprom(struct iwn_softc *sc) 1818 { 1819 struct iwn5000_eeprom_calib_hdr hdr; 1820 int32_t volt; 1821 uint32_t base, addr; 1822 uint16_t val; 1823 int i; 1824 1825 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1826 1827 /* Read regulatory domain (4 ASCII characters). */ 1828 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1829 base = le16toh(val); 1830 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1831 sc->eeprom_domain, 4); 1832 1833 /* Read the list of authorized channels (20MHz ones only). */ 1834 for (i = 0; i < 7; i++) { 1835 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1836 addr = base + iwn6000_regulatory_bands[i]; 1837 else 1838 addr = base + iwn5000_regulatory_bands[i]; 1839 iwn_read_eeprom_channels(sc, i, addr); 1840 } 1841 1842 /* Read enhanced TX power information for 6000 Series. */ 1843 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1844 iwn_read_eeprom_enhinfo(sc); 1845 1846 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1847 base = le16toh(val); 1848 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1849 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1850 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 1851 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1852 sc->calib_ver = hdr.version; 1853 1854 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1855 /* Compute temperature offset. */ 1856 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1857 sc->eeprom_temp = le16toh(val); 1858 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1859 volt = le16toh(val); 1860 sc->temp_off = sc->eeprom_temp - (volt / -5); 1861 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1862 sc->eeprom_temp, volt, sc->temp_off); 1863 } else { 1864 /* Read crystal calibration. */ 1865 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1866 &sc->eeprom_crystal, sizeof (uint32_t)); 1867 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 1868 le32toh(sc->eeprom_crystal)); 1869 } 1870 1871 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1872 1873 } 1874 1875 /* 1876 * Translate EEPROM flags to net80211. 1877 */ 1878 static uint32_t 1879 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1880 { 1881 uint32_t nflags; 1882 1883 nflags = 0; 1884 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1885 nflags |= IEEE80211_CHAN_PASSIVE; 1886 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1887 nflags |= IEEE80211_CHAN_NOADHOC; 1888 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1889 nflags |= IEEE80211_CHAN_DFS; 1890 /* XXX apparently IBSS may still be marked */ 1891 nflags |= IEEE80211_CHAN_NOADHOC; 1892 } 1893 1894 return nflags; 1895 } 1896 1897 static void 1898 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1899 { 1900 struct ifnet *ifp = sc->sc_ifp; 1901 struct ieee80211com *ic = ifp->if_l2com; 1902 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1903 const struct iwn_chan_band *band = &iwn_bands[n]; 1904 struct ieee80211_channel *c; 1905 uint8_t chan; 1906 int i, nflags; 1907 1908 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1909 1910 for (i = 0; i < band->nchan; i++) { 1911 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1912 DPRINTF(sc, IWN_DEBUG_RESET, 1913 "skip chan %d flags 0x%x maxpwr %d\n", 1914 band->chan[i], channels[i].flags, 1915 channels[i].maxpwr); 1916 continue; 1917 } 1918 chan = band->chan[i]; 1919 nflags = iwn_eeprom_channel_flags(&channels[i]); 1920 1921 c = &ic->ic_channels[ic->ic_nchans++]; 1922 c->ic_ieee = chan; 1923 c->ic_maxregpower = channels[i].maxpwr; 1924 c->ic_maxpower = 2*c->ic_maxregpower; 1925 1926 if (n == 0) { /* 2GHz band */ 1927 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 1928 /* G =>'s B is supported */ 1929 c->ic_flags = IEEE80211_CHAN_B | nflags; 1930 c = &ic->ic_channels[ic->ic_nchans++]; 1931 c[0] = c[-1]; 1932 c->ic_flags = IEEE80211_CHAN_G | nflags; 1933 } else { /* 5GHz band */ 1934 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 1935 c->ic_flags = IEEE80211_CHAN_A | nflags; 1936 } 1937 1938 /* Save maximum allowed TX power for this channel. */ 1939 sc->maxpwr[chan] = channels[i].maxpwr; 1940 1941 DPRINTF(sc, IWN_DEBUG_RESET, 1942 "add chan %d flags 0x%x maxpwr %d\n", chan, 1943 channels[i].flags, channels[i].maxpwr); 1944 1945 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 1946 /* add HT20, HT40 added separately */ 1947 c = &ic->ic_channels[ic->ic_nchans++]; 1948 c[0] = c[-1]; 1949 c->ic_flags |= IEEE80211_CHAN_HT20; 1950 } 1951 } 1952 1953 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1954 1955 } 1956 1957 static void 1958 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1959 { 1960 struct ifnet *ifp = sc->sc_ifp; 1961 struct ieee80211com *ic = ifp->if_l2com; 1962 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1963 const struct iwn_chan_band *band = &iwn_bands[n]; 1964 struct ieee80211_channel *c, *cent, *extc; 1965 uint8_t chan; 1966 int i, nflags; 1967 1968 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 1969 1970 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 1971 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 1972 return; 1973 } 1974 1975 for (i = 0; i < band->nchan; i++) { 1976 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1977 DPRINTF(sc, IWN_DEBUG_RESET, 1978 "skip chan %d flags 0x%x maxpwr %d\n", 1979 band->chan[i], channels[i].flags, 1980 channels[i].maxpwr); 1981 continue; 1982 } 1983 chan = band->chan[i]; 1984 nflags = iwn_eeprom_channel_flags(&channels[i]); 1985 1986 /* 1987 * Each entry defines an HT40 channel pair; find the 1988 * center channel, then the extension channel above. 1989 */ 1990 cent = ieee80211_find_channel_byieee(ic, chan, 1991 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 1992 if (cent == NULL) { /* XXX shouldn't happen */ 1993 device_printf(sc->sc_dev, 1994 "%s: no entry for channel %d\n", __func__, chan); 1995 continue; 1996 } 1997 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 1998 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 1999 if (extc == NULL) { 2000 DPRINTF(sc, IWN_DEBUG_RESET, 2001 "%s: skip chan %d, extension channel not found\n", 2002 __func__, chan); 2003 continue; 2004 } 2005 2006 DPRINTF(sc, IWN_DEBUG_RESET, 2007 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2008 chan, channels[i].flags, channels[i].maxpwr); 2009 2010 c = &ic->ic_channels[ic->ic_nchans++]; 2011 c[0] = cent[0]; 2012 c->ic_extieee = extc->ic_ieee; 2013 c->ic_flags &= ~IEEE80211_CHAN_HT; 2014 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2015 c = &ic->ic_channels[ic->ic_nchans++]; 2016 c[0] = extc[0]; 2017 c->ic_extieee = cent->ic_ieee; 2018 c->ic_flags &= ~IEEE80211_CHAN_HT; 2019 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2020 } 2021 2022 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2023 2024 } 2025 2026 static void 2027 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2028 { 2029 struct ifnet *ifp = sc->sc_ifp; 2030 struct ieee80211com *ic = ifp->if_l2com; 2031 2032 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2033 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2034 2035 if (n < 5) 2036 iwn_read_eeprom_band(sc, n); 2037 else 2038 iwn_read_eeprom_ht40(sc, n); 2039 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2040 } 2041 2042 static struct iwn_eeprom_chan * 2043 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2044 { 2045 int band, chan, i, j; 2046 2047 if (IEEE80211_IS_CHAN_HT40(c)) { 2048 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2049 if (IEEE80211_IS_CHAN_HT40D(c)) 2050 chan = c->ic_extieee; 2051 else 2052 chan = c->ic_ieee; 2053 for (i = 0; i < iwn_bands[band].nchan; i++) { 2054 if (iwn_bands[band].chan[i] == chan) 2055 return &sc->eeprom_channels[band][i]; 2056 } 2057 } else { 2058 for (j = 0; j < 5; j++) { 2059 for (i = 0; i < iwn_bands[j].nchan; i++) { 2060 if (iwn_bands[j].chan[i] == c->ic_ieee) 2061 return &sc->eeprom_channels[j][i]; 2062 } 2063 } 2064 } 2065 return NULL; 2066 } 2067 2068 /* 2069 * Enforce flags read from EEPROM. 2070 */ 2071 static int 2072 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2073 int nchan, struct ieee80211_channel chans[]) 2074 { 2075 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2076 int i; 2077 2078 for (i = 0; i < nchan; i++) { 2079 struct ieee80211_channel *c = &chans[i]; 2080 struct iwn_eeprom_chan *channel; 2081 2082 channel = iwn_find_eeprom_channel(sc, c); 2083 if (channel == NULL) { 2084 if_printf(ic->ic_ifp, 2085 "%s: invalid channel %u freq %u/0x%x\n", 2086 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2087 return EINVAL; 2088 } 2089 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2090 } 2091 2092 return 0; 2093 } 2094 2095 static void 2096 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2097 { 2098 struct iwn_eeprom_enhinfo enhinfo[35]; 2099 struct ifnet *ifp = sc->sc_ifp; 2100 struct ieee80211com *ic = ifp->if_l2com; 2101 struct ieee80211_channel *c; 2102 uint16_t val, base; 2103 int8_t maxpwr; 2104 uint8_t flags; 2105 int i, j; 2106 2107 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2108 2109 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2110 base = le16toh(val); 2111 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2112 enhinfo, sizeof enhinfo); 2113 2114 for (i = 0; i < nitems(enhinfo); i++) { 2115 flags = enhinfo[i].flags; 2116 if (!(flags & IWN_ENHINFO_VALID)) 2117 continue; /* Skip invalid entries. */ 2118 2119 maxpwr = 0; 2120 if (sc->txchainmask & IWN_ANT_A) 2121 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2122 if (sc->txchainmask & IWN_ANT_B) 2123 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2124 if (sc->txchainmask & IWN_ANT_C) 2125 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2126 if (sc->ntxchains == 2) 2127 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2128 else if (sc->ntxchains == 3) 2129 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2130 2131 for (j = 0; j < ic->ic_nchans; j++) { 2132 c = &ic->ic_channels[j]; 2133 if ((flags & IWN_ENHINFO_5GHZ)) { 2134 if (!IEEE80211_IS_CHAN_A(c)) 2135 continue; 2136 } else if ((flags & IWN_ENHINFO_OFDM)) { 2137 if (!IEEE80211_IS_CHAN_G(c)) 2138 continue; 2139 } else if (!IEEE80211_IS_CHAN_B(c)) 2140 continue; 2141 if ((flags & IWN_ENHINFO_HT40)) { 2142 if (!IEEE80211_IS_CHAN_HT40(c)) 2143 continue; 2144 } else { 2145 if (IEEE80211_IS_CHAN_HT40(c)) 2146 continue; 2147 } 2148 if (enhinfo[i].chan != 0 && 2149 enhinfo[i].chan != c->ic_ieee) 2150 continue; 2151 2152 DPRINTF(sc, IWN_DEBUG_RESET, 2153 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2154 c->ic_flags, maxpwr / 2); 2155 c->ic_maxregpower = maxpwr / 2; 2156 c->ic_maxpower = maxpwr; 2157 } 2158 } 2159 2160 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2161 2162 } 2163 2164 static struct ieee80211_node * 2165 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2166 { 2167 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2168 } 2169 2170 static __inline int 2171 rate2plcp(int rate) 2172 { 2173 switch (rate & 0xff) { 2174 case 12: return 0xd; 2175 case 18: return 0xf; 2176 case 24: return 0x5; 2177 case 36: return 0x7; 2178 case 48: return 0x9; 2179 case 72: return 0xb; 2180 case 96: return 0x1; 2181 case 108: return 0x3; 2182 case 2: return 10; 2183 case 4: return 20; 2184 case 11: return 55; 2185 case 22: return 110; 2186 } 2187 return 0; 2188 } 2189 2190 /* 2191 * Calculate the required PLCP value from the given rate, 2192 * to the given node. 2193 * 2194 * This will take the node configuration (eg 11n, rate table 2195 * setup, etc) into consideration. 2196 */ 2197 static uint32_t 2198 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2199 uint8_t rate) 2200 { 2201 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2202 struct ieee80211com *ic = ni->ni_ic; 2203 uint8_t txant1, txant2; 2204 uint32_t plcp = 0; 2205 int ridx; 2206 2207 /* Use the first valid TX antenna. */ 2208 txant1 = IWN_LSB(sc->txchainmask); 2209 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2210 2211 /* 2212 * If it's an MCS rate, let's set the plcp correctly 2213 * and set the relevant flags based on the node config. 2214 */ 2215 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 2216 /* 2217 * Set the initial PLCP value to be between 0->31 for 2218 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2219 * flag. 2220 */ 2221 plcp = RV(rate) | IWN_RFLAG_MCS; 2222 2223 /* 2224 * XXX the following should only occur if both 2225 * the local configuration _and_ the remote node 2226 * advertise these capabilities. Thus this code 2227 * may need fixing! 2228 */ 2229 2230 /* 2231 * Set the channel width and guard interval. 2232 */ 2233 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2234 plcp |= IWN_RFLAG_HT40; 2235 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2236 plcp |= IWN_RFLAG_SGI; 2237 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2238 plcp |= IWN_RFLAG_SGI; 2239 } 2240 2241 /* 2242 * If it's a two stream rate, enable TX on both 2243 * antennas. 2244 * 2245 * XXX three stream rates? 2246 */ 2247 if (rate > 0x87) 2248 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2249 else 2250 plcp |= IWN_RFLAG_ANT(txant1); 2251 } else { 2252 /* 2253 * Set the initial PLCP - fine for both 2254 * OFDM and CCK rates. 2255 */ 2256 plcp = rate2plcp(rate); 2257 2258 /* Set CCK flag if it's CCK */ 2259 2260 /* XXX It would be nice to have a method 2261 * to map the ridx -> phy table entry 2262 * so we could just query that, rather than 2263 * this hack to check against IWN_RIDX_OFDM6. 2264 */ 2265 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2266 rate & IEEE80211_RATE_VAL); 2267 if (ridx < IWN_RIDX_OFDM6 && 2268 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2269 plcp |= IWN_RFLAG_CCK; 2270 2271 /* Set antenna configuration */ 2272 plcp |= IWN_RFLAG_ANT(txant1); 2273 } 2274 2275 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2276 __func__, 2277 rate, 2278 plcp); 2279 2280 return (htole32(plcp)); 2281 #undef RV 2282 } 2283 2284 static void 2285 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2286 { 2287 /* Doesn't do anything at the moment */ 2288 } 2289 2290 static int 2291 iwn_media_change(struct ifnet *ifp) 2292 { 2293 int error; 2294 2295 error = ieee80211_media_change(ifp); 2296 /* NB: only the fixed rate can change and that doesn't need a reset */ 2297 return (error == ENETRESET ? 0 : error); 2298 } 2299 2300 static int 2301 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2302 { 2303 struct iwn_vap *ivp = IWN_VAP(vap); 2304 struct ieee80211com *ic = vap->iv_ic; 2305 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2306 int error = 0; 2307 2308 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2309 2310 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2311 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2312 2313 IEEE80211_UNLOCK(ic); 2314 IWN_LOCK(sc); 2315 callout_stop(&sc->calib_to); 2316 2317 switch (nstate) { 2318 case IEEE80211_S_ASSOC: 2319 if (vap->iv_state != IEEE80211_S_RUN) 2320 break; 2321 /* FALLTHROUGH */ 2322 case IEEE80211_S_AUTH: 2323 if (vap->iv_state == IEEE80211_S_AUTH) 2324 break; 2325 2326 /* 2327 * !AUTH -> AUTH transition requires state reset to handle 2328 * reassociations correctly. 2329 */ 2330 sc->rxon.associd = 0; 2331 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 2332 sc->calib.state = IWN_CALIB_STATE_INIT; 2333 2334 if ((error = iwn_auth(sc, vap)) != 0) { 2335 device_printf(sc->sc_dev, 2336 "%s: could not move to auth state\n", __func__); 2337 } 2338 break; 2339 2340 case IEEE80211_S_RUN: 2341 /* 2342 * RUN -> RUN transition; Just restart the timers. 2343 */ 2344 if (vap->iv_state == IEEE80211_S_RUN) { 2345 sc->calib_cnt = 0; 2346 break; 2347 } 2348 2349 /* 2350 * !RUN -> RUN requires setting the association id 2351 * which is done with a firmware cmd. We also defer 2352 * starting the timers until that work is done. 2353 */ 2354 if ((error = iwn_run(sc, vap)) != 0) { 2355 device_printf(sc->sc_dev, 2356 "%s: could not move to run state\n", __func__); 2357 } 2358 break; 2359 2360 case IEEE80211_S_INIT: 2361 sc->calib.state = IWN_CALIB_STATE_INIT; 2362 break; 2363 2364 default: 2365 break; 2366 } 2367 IWN_UNLOCK(sc); 2368 IEEE80211_LOCK(ic); 2369 if (error != 0){ 2370 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2371 return error; 2372 } 2373 2374 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2375 2376 return ivp->iv_newstate(vap, nstate, arg); 2377 } 2378 2379 static void 2380 iwn_calib_timeout(void *arg) 2381 { 2382 struct iwn_softc *sc = arg; 2383 2384 IWN_LOCK_ASSERT(sc); 2385 2386 /* Force automatic TX power calibration every 60 secs. */ 2387 if (++sc->calib_cnt >= 120) { 2388 uint32_t flags = 0; 2389 2390 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2391 "sending request for statistics"); 2392 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2393 sizeof flags, 1); 2394 sc->calib_cnt = 0; 2395 } 2396 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2397 sc); 2398 } 2399 2400 /* 2401 * Process an RX_PHY firmware notification. This is usually immediately 2402 * followed by an MPDU_RX_DONE notification. 2403 */ 2404 static void 2405 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2406 struct iwn_rx_data *data) 2407 { 2408 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2409 2410 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2411 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2412 2413 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2414 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2415 sc->last_rx_valid = 1; 2416 } 2417 2418 /* 2419 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2420 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2421 */ 2422 static void 2423 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2424 struct iwn_rx_data *data) 2425 { 2426 struct iwn_ops *ops = &sc->ops; 2427 struct ifnet *ifp = sc->sc_ifp; 2428 struct ieee80211com *ic = ifp->if_l2com; 2429 struct iwn_rx_ring *ring = &sc->rxq; 2430 struct ieee80211_frame *wh; 2431 struct ieee80211_node *ni; 2432 struct mbuf *m, *m1; 2433 struct iwn_rx_stat *stat; 2434 caddr_t head; 2435 bus_addr_t paddr; 2436 uint32_t flags; 2437 int error, len, rssi, nf; 2438 2439 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2440 2441 if (desc->type == IWN_MPDU_RX_DONE) { 2442 /* Check for prior RX_PHY notification. */ 2443 if (!sc->last_rx_valid) { 2444 DPRINTF(sc, IWN_DEBUG_ANY, 2445 "%s: missing RX_PHY\n", __func__); 2446 return; 2447 } 2448 stat = &sc->last_rx_stat; 2449 } else 2450 stat = (struct iwn_rx_stat *)(desc + 1); 2451 2452 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2453 2454 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2455 device_printf(sc->sc_dev, 2456 "%s: invalid RX statistic header, len %d\n", __func__, 2457 stat->cfg_phy_len); 2458 return; 2459 } 2460 if (desc->type == IWN_MPDU_RX_DONE) { 2461 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2462 head = (caddr_t)(mpdu + 1); 2463 len = le16toh(mpdu->len); 2464 } else { 2465 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2466 len = le16toh(stat->len); 2467 } 2468 2469 flags = le32toh(*(uint32_t *)(head + len)); 2470 2471 /* Discard frames with a bad FCS early. */ 2472 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2473 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2474 __func__, flags); 2475 ifp->if_ierrors++; 2476 return; 2477 } 2478 /* Discard frames that are too short. */ 2479 if (len < sizeof (*wh)) { 2480 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2481 __func__, len); 2482 ifp->if_ierrors++; 2483 return; 2484 } 2485 2486 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2487 if (m1 == NULL) { 2488 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2489 __func__); 2490 ifp->if_ierrors++; 2491 return; 2492 } 2493 bus_dmamap_unload(ring->data_dmat, data->map); 2494 2495 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2496 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2497 if (error != 0 && error != EFBIG) { 2498 device_printf(sc->sc_dev, 2499 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2500 m_freem(m1); 2501 2502 /* Try to reload the old mbuf. */ 2503 error = bus_dmamap_load(ring->data_dmat, data->map, 2504 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2505 &paddr, BUS_DMA_NOWAIT); 2506 if (error != 0 && error != EFBIG) { 2507 panic("%s: could not load old RX mbuf", __func__); 2508 } 2509 /* Physical address may have changed. */ 2510 ring->desc[ring->cur] = htole32(paddr >> 8); 2511 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2512 BUS_DMASYNC_PREWRITE); 2513 ifp->if_ierrors++; 2514 return; 2515 } 2516 2517 m = data->m; 2518 data->m = m1; 2519 /* Update RX descriptor. */ 2520 ring->desc[ring->cur] = htole32(paddr >> 8); 2521 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2522 BUS_DMASYNC_PREWRITE); 2523 2524 /* Finalize mbuf. */ 2525 m->m_pkthdr.rcvif = ifp; 2526 m->m_data = head; 2527 m->m_pkthdr.len = m->m_len = len; 2528 2529 /* Grab a reference to the source node. */ 2530 wh = mtod(m, struct ieee80211_frame *); 2531 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2532 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2533 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2534 2535 rssi = ops->get_rssi(sc, stat); 2536 2537 if (ieee80211_radiotap_active(ic)) { 2538 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2539 2540 tap->wr_flags = 0; 2541 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2542 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2543 tap->wr_dbm_antsignal = (int8_t)rssi; 2544 tap->wr_dbm_antnoise = (int8_t)nf; 2545 tap->wr_tsft = stat->tstamp; 2546 switch (stat->rate) { 2547 /* CCK rates. */ 2548 case 10: tap->wr_rate = 2; break; 2549 case 20: tap->wr_rate = 4; break; 2550 case 55: tap->wr_rate = 11; break; 2551 case 110: tap->wr_rate = 22; break; 2552 /* OFDM rates. */ 2553 case 0xd: tap->wr_rate = 12; break; 2554 case 0xf: tap->wr_rate = 18; break; 2555 case 0x5: tap->wr_rate = 24; break; 2556 case 0x7: tap->wr_rate = 36; break; 2557 case 0x9: tap->wr_rate = 48; break; 2558 case 0xb: tap->wr_rate = 72; break; 2559 case 0x1: tap->wr_rate = 96; break; 2560 case 0x3: tap->wr_rate = 108; break; 2561 /* Unknown rate: should not happen. */ 2562 default: tap->wr_rate = 0; 2563 } 2564 } 2565 2566 IWN_UNLOCK(sc); 2567 2568 /* Send the frame to the 802.11 layer. */ 2569 if (ni != NULL) { 2570 if (ni->ni_flags & IEEE80211_NODE_HT) 2571 m->m_flags |= M_AMPDU; 2572 (void)ieee80211_input(ni, m, rssi - nf, nf); 2573 /* Node is no longer needed. */ 2574 ieee80211_free_node(ni); 2575 } else 2576 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 2577 2578 IWN_LOCK(sc); 2579 2580 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2581 2582 } 2583 2584 /* Process an incoming Compressed BlockAck. */ 2585 static void 2586 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2587 struct iwn_rx_data *data) 2588 { 2589 struct iwn_ops *ops = &sc->ops; 2590 struct ifnet *ifp = sc->sc_ifp; 2591 struct iwn_node *wn; 2592 struct ieee80211_node *ni; 2593 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2594 struct iwn_tx_ring *txq; 2595 struct iwn_tx_data *txdata; 2596 struct ieee80211_tx_ampdu *tap; 2597 struct mbuf *m; 2598 uint64_t bitmap; 2599 uint16_t ssn; 2600 uint8_t tid; 2601 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 2602 2603 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2604 2605 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2606 2607 qid = le16toh(ba->qid); 2608 txq = &sc->txq[ba->qid]; 2609 tap = sc->qid2tap[ba->qid]; 2610 tid = tap->txa_tid; 2611 wn = (void *)tap->txa_ni; 2612 2613 res = NULL; 2614 ssn = 0; 2615 if (!IEEE80211_AMPDU_RUNNING(tap)) { 2616 res = tap->txa_private; 2617 ssn = tap->txa_start & 0xfff; 2618 } 2619 2620 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 2621 txdata = &txq->data[txq->read]; 2622 2623 /* Unmap and free mbuf. */ 2624 bus_dmamap_sync(txq->data_dmat, txdata->map, 2625 BUS_DMASYNC_POSTWRITE); 2626 bus_dmamap_unload(txq->data_dmat, txdata->map); 2627 m = txdata->m, txdata->m = NULL; 2628 ni = txdata->ni, txdata->ni = NULL; 2629 2630 KASSERT(ni != NULL, ("no node")); 2631 KASSERT(m != NULL, ("no mbuf")); 2632 2633 if (m->m_flags & M_TXCB) 2634 ieee80211_process_callback(ni, m, 1); 2635 2636 m_freem(m); 2637 ieee80211_free_node(ni); 2638 2639 txq->queued--; 2640 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 2641 } 2642 2643 if (txq->queued == 0 && res != NULL) { 2644 iwn_nic_lock(sc); 2645 ops->ampdu_tx_stop(sc, qid, tid, ssn); 2646 iwn_nic_unlock(sc); 2647 sc->qid2tap[qid] = NULL; 2648 free(res, M_DEVBUF); 2649 return; 2650 } 2651 2652 if (wn->agg[tid].bitmap == 0) 2653 return; 2654 2655 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 2656 if (shift < 0) 2657 shift += 0x100; 2658 2659 if (wn->agg[tid].nframes > (64 - shift)) 2660 return; 2661 2662 ni = tap->txa_ni; 2663 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 2664 for (i = 0; bitmap; i++) { 2665 if ((bitmap & 1) == 0) { 2666 ifp->if_oerrors++; 2667 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2668 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2669 } else { 2670 ifp->if_opackets++; 2671 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2672 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2673 } 2674 bitmap >>= 1; 2675 } 2676 2677 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2678 2679 } 2680 2681 /* 2682 * Process a CALIBRATION_RESULT notification sent by the initialization 2683 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2684 */ 2685 static void 2686 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2687 struct iwn_rx_data *data) 2688 { 2689 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2690 int len, idx = -1; 2691 2692 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2693 2694 /* Runtime firmware should not send such a notification. */ 2695 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 2696 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 2697 __func__); 2698 return; 2699 } 2700 len = (le32toh(desc->len) & 0x3fff) - 4; 2701 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2702 2703 switch (calib->code) { 2704 case IWN5000_PHY_CALIB_DC: 2705 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 && 2706 (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2707 sc->hw_type >= IWN_HW_REV_TYPE_6000) && 2708 sc->hw_type != IWN_HW_REV_TYPE_6050) 2709 idx = 0; 2710 break; 2711 case IWN5000_PHY_CALIB_LO: 2712 idx = 1; 2713 break; 2714 case IWN5000_PHY_CALIB_TX_IQ: 2715 idx = 2; 2716 break; 2717 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2718 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2719 sc->hw_type != IWN_HW_REV_TYPE_5150) 2720 idx = 3; 2721 break; 2722 case IWN5000_PHY_CALIB_BASE_BAND: 2723 idx = 4; 2724 break; 2725 } 2726 if (idx == -1) /* Ignore other results. */ 2727 return; 2728 2729 /* Save calibration result. */ 2730 if (sc->calibcmd[idx].buf != NULL) 2731 free(sc->calibcmd[idx].buf, M_DEVBUF); 2732 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2733 if (sc->calibcmd[idx].buf == NULL) { 2734 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2735 "not enough memory for calibration result %d\n", 2736 calib->code); 2737 return; 2738 } 2739 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2740 "saving calibration result code=%d len=%d\n", calib->code, len); 2741 sc->calibcmd[idx].len = len; 2742 memcpy(sc->calibcmd[idx].buf, calib, len); 2743 } 2744 2745 /* 2746 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2747 * The latter is sent by the firmware after each received beacon. 2748 */ 2749 static void 2750 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2751 struct iwn_rx_data *data) 2752 { 2753 struct iwn_ops *ops = &sc->ops; 2754 struct ifnet *ifp = sc->sc_ifp; 2755 struct ieee80211com *ic = ifp->if_l2com; 2756 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2757 struct iwn_calib_state *calib = &sc->calib; 2758 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2759 int temp; 2760 2761 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2762 2763 /* Ignore statistics received during a scan. */ 2764 if (vap->iv_state != IEEE80211_S_RUN || 2765 (ic->ic_flags & IEEE80211_F_SCAN)){ 2766 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 2767 __func__); 2768 return; 2769 } 2770 2771 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2772 2773 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 2774 __func__, desc->type); 2775 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2776 2777 /* Test if temperature has changed. */ 2778 if (stats->general.temp != sc->rawtemp) { 2779 /* Convert "raw" temperature to degC. */ 2780 sc->rawtemp = stats->general.temp; 2781 temp = ops->get_temperature(sc); 2782 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2783 __func__, temp); 2784 2785 /* Update TX power if need be (4965AGN only). */ 2786 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2787 iwn4965_power_calibration(sc, temp); 2788 } 2789 2790 if (desc->type != IWN_BEACON_STATISTICS) 2791 return; /* Reply to a statistics request. */ 2792 2793 sc->noise = iwn_get_noise(&stats->rx.general); 2794 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2795 2796 /* Test that RSSI and noise are present in stats report. */ 2797 if (le32toh(stats->rx.general.flags) != 1) { 2798 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2799 "received statistics without RSSI"); 2800 return; 2801 } 2802 2803 if (calib->state == IWN_CALIB_STATE_ASSOC) 2804 iwn_collect_noise(sc, &stats->rx.general); 2805 else if (calib->state == IWN_CALIB_STATE_RUN) 2806 iwn_tune_sensitivity(sc, &stats->rx); 2807 2808 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2809 } 2810 2811 /* 2812 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2813 * and 5000 adapters have different incompatible TX status formats. 2814 */ 2815 static void 2816 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2817 struct iwn_rx_data *data) 2818 { 2819 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2820 struct iwn_tx_ring *ring; 2821 int qid; 2822 2823 qid = desc->qid & 0xf; 2824 ring = &sc->txq[qid]; 2825 2826 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2827 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2828 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2829 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2830 le32toh(stat->status)); 2831 2832 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2833 if (qid >= sc->firstaggqueue) { 2834 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2835 &stat->status); 2836 } else { 2837 iwn_tx_done(sc, desc, stat->ackfailcnt, 2838 le32toh(stat->status) & 0xff); 2839 } 2840 } 2841 2842 static void 2843 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2844 struct iwn_rx_data *data) 2845 { 2846 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2847 struct iwn_tx_ring *ring; 2848 int qid; 2849 2850 qid = desc->qid & 0xf; 2851 ring = &sc->txq[qid]; 2852 2853 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2854 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2855 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2856 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2857 le32toh(stat->status)); 2858 2859 #ifdef notyet 2860 /* Reset TX scheduler slot. */ 2861 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2862 #endif 2863 2864 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2865 if (qid >= sc->firstaggqueue) { 2866 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2867 &stat->status); 2868 } else { 2869 iwn_tx_done(sc, desc, stat->ackfailcnt, 2870 le16toh(stat->status) & 0xff); 2871 } 2872 } 2873 2874 /* 2875 * Adapter-independent backend for TX_DONE firmware notifications. 2876 */ 2877 static void 2878 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2879 uint8_t status) 2880 { 2881 struct ifnet *ifp = sc->sc_ifp; 2882 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2883 struct iwn_tx_data *data = &ring->data[desc->idx]; 2884 struct mbuf *m; 2885 struct ieee80211_node *ni; 2886 struct ieee80211vap *vap; 2887 2888 KASSERT(data->ni != NULL, ("no node")); 2889 2890 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2891 2892 /* Unmap and free mbuf. */ 2893 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2894 bus_dmamap_unload(ring->data_dmat, data->map); 2895 m = data->m, data->m = NULL; 2896 ni = data->ni, data->ni = NULL; 2897 vap = ni->ni_vap; 2898 2899 if (m->m_flags & M_TXCB) { 2900 /* 2901 * Channels marked for "radar" require traffic to be received 2902 * to unlock before we can transmit. Until traffic is seen 2903 * any attempt to transmit is returned immediately with status 2904 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2905 * happen on first authenticate after scanning. To workaround 2906 * this we ignore a failure of this sort in AUTH state so the 2907 * 802.11 layer will fall back to using a timeout to wait for 2908 * the AUTH reply. This allows the firmware time to see 2909 * traffic so a subsequent retry of AUTH succeeds. It's 2910 * unclear why the firmware does not maintain state for 2911 * channels recently visited as this would allow immediate 2912 * use of the channel after a scan (where we see traffic). 2913 */ 2914 if (status == IWN_TX_FAIL_TX_LOCKED && 2915 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2916 ieee80211_process_callback(ni, m, 0); 2917 else 2918 ieee80211_process_callback(ni, m, 2919 (status & IWN_TX_FAIL) != 0); 2920 } 2921 2922 /* 2923 * Update rate control statistics for the node. 2924 */ 2925 if (status & IWN_TX_FAIL) { 2926 ifp->if_oerrors++; 2927 ieee80211_ratectl_tx_complete(vap, ni, 2928 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2929 } else { 2930 ifp->if_opackets++; 2931 ieee80211_ratectl_tx_complete(vap, ni, 2932 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2933 } 2934 m_freem(m); 2935 ieee80211_free_node(ni); 2936 2937 sc->sc_tx_timer = 0; 2938 if (--ring->queued < IWN_TX_RING_LOMARK) { 2939 sc->qfullmsk &= ~(1 << ring->qid); 2940 if (sc->qfullmsk == 0 && 2941 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2942 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2943 iwn_start_locked(ifp); 2944 } 2945 } 2946 2947 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2948 2949 } 2950 2951 /* 2952 * Process a "command done" firmware notification. This is where we wakeup 2953 * processes waiting for a synchronous command completion. 2954 */ 2955 static void 2956 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2957 { 2958 struct iwn_tx_ring *ring = &sc->txq[4]; 2959 struct iwn_tx_data *data; 2960 2961 if ((desc->qid & 0xf) != 4) 2962 return; /* Not a command ack. */ 2963 2964 data = &ring->data[desc->idx]; 2965 2966 /* If the command was mapped in an mbuf, free it. */ 2967 if (data->m != NULL) { 2968 bus_dmamap_sync(ring->data_dmat, data->map, 2969 BUS_DMASYNC_POSTWRITE); 2970 bus_dmamap_unload(ring->data_dmat, data->map); 2971 m_freem(data->m); 2972 data->m = NULL; 2973 } 2974 wakeup(&ring->desc[desc->idx]); 2975 } 2976 2977 static void 2978 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 2979 void *stat) 2980 { 2981 struct iwn_ops *ops = &sc->ops; 2982 struct ifnet *ifp = sc->sc_ifp; 2983 struct iwn_tx_ring *ring = &sc->txq[qid]; 2984 struct iwn_tx_data *data; 2985 struct mbuf *m; 2986 struct iwn_node *wn; 2987 struct ieee80211_node *ni; 2988 struct ieee80211_tx_ampdu *tap; 2989 uint64_t bitmap; 2990 uint32_t *status = stat; 2991 uint16_t *aggstatus = stat; 2992 uint16_t ssn; 2993 uint8_t tid; 2994 int bit, i, lastidx, *res, seqno, shift, start; 2995 2996 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2997 2998 #ifdef NOT_YET 2999 if (nframes == 1) { 3000 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) 3001 printf("ieee80211_send_bar()\n"); 3002 } 3003 #endif 3004 3005 bitmap = 0; 3006 start = idx; 3007 for (i = 0; i < nframes; i++) { 3008 if (le16toh(aggstatus[i * 2]) & 0xc) 3009 continue; 3010 3011 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3012 bit = idx - start; 3013 shift = 0; 3014 if (bit >= 64) { 3015 shift = 0x100 - idx + start; 3016 bit = 0; 3017 start = idx; 3018 } else if (bit <= -64) 3019 bit = 0x100 - start + idx; 3020 else if (bit < 0) { 3021 shift = start - idx; 3022 start = idx; 3023 bit = 0; 3024 } 3025 bitmap = bitmap << shift; 3026 bitmap |= 1ULL << bit; 3027 } 3028 tap = sc->qid2tap[qid]; 3029 tid = tap->txa_tid; 3030 wn = (void *)tap->txa_ni; 3031 wn->agg[tid].bitmap = bitmap; 3032 wn->agg[tid].startidx = start; 3033 wn->agg[tid].nframes = nframes; 3034 3035 res = NULL; 3036 ssn = 0; 3037 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3038 res = tap->txa_private; 3039 ssn = tap->txa_start & 0xfff; 3040 } 3041 3042 seqno = le32toh(*(status + nframes)) & 0xfff; 3043 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3044 data = &ring->data[ring->read]; 3045 3046 /* Unmap and free mbuf. */ 3047 bus_dmamap_sync(ring->data_dmat, data->map, 3048 BUS_DMASYNC_POSTWRITE); 3049 bus_dmamap_unload(ring->data_dmat, data->map); 3050 m = data->m, data->m = NULL; 3051 ni = data->ni, data->ni = NULL; 3052 3053 KASSERT(ni != NULL, ("no node")); 3054 KASSERT(m != NULL, ("no mbuf")); 3055 3056 if (m->m_flags & M_TXCB) 3057 ieee80211_process_callback(ni, m, 1); 3058 3059 m_freem(m); 3060 ieee80211_free_node(ni); 3061 3062 ring->queued--; 3063 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3064 } 3065 3066 if (ring->queued == 0 && res != NULL) { 3067 iwn_nic_lock(sc); 3068 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3069 iwn_nic_unlock(sc); 3070 sc->qid2tap[qid] = NULL; 3071 free(res, M_DEVBUF); 3072 return; 3073 } 3074 3075 sc->sc_tx_timer = 0; 3076 if (ring->queued < IWN_TX_RING_LOMARK) { 3077 sc->qfullmsk &= ~(1 << ring->qid); 3078 if (sc->qfullmsk == 0 && 3079 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3080 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3081 iwn_start_locked(ifp); 3082 } 3083 } 3084 3085 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3086 3087 } 3088 3089 /* 3090 * Process an INT_FH_RX or INT_SW_RX interrupt. 3091 */ 3092 static void 3093 iwn_notif_intr(struct iwn_softc *sc) 3094 { 3095 struct iwn_ops *ops = &sc->ops; 3096 struct ifnet *ifp = sc->sc_ifp; 3097 struct ieee80211com *ic = ifp->if_l2com; 3098 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3099 uint16_t hw; 3100 3101 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3102 BUS_DMASYNC_POSTREAD); 3103 3104 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3105 while (sc->rxq.cur != hw) { 3106 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3107 struct iwn_rx_desc *desc; 3108 3109 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3110 BUS_DMASYNC_POSTREAD); 3111 desc = mtod(data->m, struct iwn_rx_desc *); 3112 3113 DPRINTF(sc, IWN_DEBUG_RECV, 3114 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 3115 __func__, desc->qid & 0xf, desc->idx, desc->flags, 3116 desc->type, iwn_intr_str(desc->type), 3117 le16toh(desc->len)); 3118 3119 if (!(desc->qid & 0x80)) /* Reply to a command. */ 3120 iwn_cmd_done(sc, desc); 3121 3122 switch (desc->type) { 3123 case IWN_RX_PHY: 3124 iwn_rx_phy(sc, desc, data); 3125 break; 3126 3127 case IWN_RX_DONE: /* 4965AGN only. */ 3128 case IWN_MPDU_RX_DONE: 3129 /* An 802.11 frame has been received. */ 3130 iwn_rx_done(sc, desc, data); 3131 break; 3132 3133 case IWN_RX_COMPRESSED_BA: 3134 /* A Compressed BlockAck has been received. */ 3135 iwn_rx_compressed_ba(sc, desc, data); 3136 break; 3137 3138 case IWN_TX_DONE: 3139 /* An 802.11 frame has been transmitted. */ 3140 ops->tx_done(sc, desc, data); 3141 break; 3142 3143 case IWN_RX_STATISTICS: 3144 case IWN_BEACON_STATISTICS: 3145 iwn_rx_statistics(sc, desc, data); 3146 break; 3147 3148 case IWN_BEACON_MISSED: 3149 { 3150 struct iwn_beacon_missed *miss = 3151 (struct iwn_beacon_missed *)(desc + 1); 3152 int misses; 3153 3154 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3155 BUS_DMASYNC_POSTREAD); 3156 misses = le32toh(miss->consecutive); 3157 3158 DPRINTF(sc, IWN_DEBUG_STATE, 3159 "%s: beacons missed %d/%d\n", __func__, 3160 misses, le32toh(miss->total)); 3161 /* 3162 * If more than 5 consecutive beacons are missed, 3163 * reinitialize the sensitivity state machine. 3164 */ 3165 if (vap->iv_state == IEEE80211_S_RUN && 3166 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3167 if (misses > 5) 3168 (void)iwn_init_sensitivity(sc); 3169 if (misses >= vap->iv_bmissthreshold) { 3170 IWN_UNLOCK(sc); 3171 ieee80211_beacon_miss(ic); 3172 IWN_LOCK(sc); 3173 } 3174 } 3175 break; 3176 } 3177 case IWN_UC_READY: 3178 { 3179 struct iwn_ucode_info *uc = 3180 (struct iwn_ucode_info *)(desc + 1); 3181 3182 /* The microcontroller is ready. */ 3183 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3184 BUS_DMASYNC_POSTREAD); 3185 DPRINTF(sc, IWN_DEBUG_RESET, 3186 "microcode alive notification version=%d.%d " 3187 "subtype=%x alive=%x\n", uc->major, uc->minor, 3188 uc->subtype, le32toh(uc->valid)); 3189 3190 if (le32toh(uc->valid) != 1) { 3191 device_printf(sc->sc_dev, 3192 "microcontroller initialization failed"); 3193 break; 3194 } 3195 if (uc->subtype == IWN_UCODE_INIT) { 3196 /* Save microcontroller report. */ 3197 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3198 } 3199 /* Save the address of the error log in SRAM. */ 3200 sc->errptr = le32toh(uc->errptr); 3201 break; 3202 } 3203 case IWN_STATE_CHANGED: 3204 { 3205 uint32_t *status = (uint32_t *)(desc + 1); 3206 3207 /* 3208 * State change allows hardware switch change to be 3209 * noted. However, we handle this in iwn_intr as we 3210 * get both the enable/disble intr. 3211 */ 3212 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3213 BUS_DMASYNC_POSTREAD); 3214 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 3215 le32toh(*status)); 3216 break; 3217 } 3218 case IWN_START_SCAN: 3219 { 3220 struct iwn_start_scan *scan = 3221 (struct iwn_start_scan *)(desc + 1); 3222 3223 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3224 BUS_DMASYNC_POSTREAD); 3225 DPRINTF(sc, IWN_DEBUG_ANY, 3226 "%s: scanning channel %d status %x\n", 3227 __func__, scan->chan, le32toh(scan->status)); 3228 break; 3229 } 3230 case IWN_STOP_SCAN: 3231 { 3232 struct iwn_stop_scan *scan = 3233 (struct iwn_stop_scan *)(desc + 1); 3234 3235 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3236 BUS_DMASYNC_POSTREAD); 3237 DPRINTF(sc, IWN_DEBUG_STATE, 3238 "scan finished nchan=%d status=%d chan=%d\n", 3239 scan->nchan, scan->status, scan->chan); 3240 3241 IWN_UNLOCK(sc); 3242 ieee80211_scan_next(vap); 3243 IWN_LOCK(sc); 3244 break; 3245 } 3246 case IWN5000_CALIBRATION_RESULT: 3247 iwn5000_rx_calib_results(sc, desc, data); 3248 break; 3249 3250 case IWN5000_CALIBRATION_DONE: 3251 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3252 wakeup(sc); 3253 break; 3254 } 3255 3256 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3257 } 3258 3259 /* Tell the firmware what we have processed. */ 3260 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3261 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3262 } 3263 3264 /* 3265 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3266 * from power-down sleep mode. 3267 */ 3268 static void 3269 iwn_wakeup_intr(struct iwn_softc *sc) 3270 { 3271 int qid; 3272 3273 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3274 __func__); 3275 3276 /* Wakeup RX and TX rings. */ 3277 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3278 for (qid = 0; qid < sc->ntxqs; qid++) { 3279 struct iwn_tx_ring *ring = &sc->txq[qid]; 3280 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3281 } 3282 } 3283 3284 static void 3285 iwn_rftoggle_intr(struct iwn_softc *sc) 3286 { 3287 struct ifnet *ifp = sc->sc_ifp; 3288 struct ieee80211com *ic = ifp->if_l2com; 3289 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3290 3291 IWN_LOCK_ASSERT(sc); 3292 3293 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3294 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3295 if (tmp & IWN_GP_CNTRL_RFKILL) 3296 ieee80211_runtask(ic, &sc->sc_radioon_task); 3297 else 3298 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3299 } 3300 3301 /* 3302 * Dump the error log of the firmware when a firmware panic occurs. Although 3303 * we can't debug the firmware because it is neither open source nor free, it 3304 * can help us to identify certain classes of problems. 3305 */ 3306 static void 3307 iwn_fatal_intr(struct iwn_softc *sc) 3308 { 3309 struct iwn_fw_dump dump; 3310 int i; 3311 3312 IWN_LOCK_ASSERT(sc); 3313 3314 /* Force a complete recalibration on next init. */ 3315 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3316 3317 /* Check that the error log address is valid. */ 3318 if (sc->errptr < IWN_FW_DATA_BASE || 3319 sc->errptr + sizeof (dump) > 3320 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3321 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3322 sc->errptr); 3323 return; 3324 } 3325 if (iwn_nic_lock(sc) != 0) { 3326 printf("%s: could not read firmware error log\n", __func__); 3327 return; 3328 } 3329 /* Read firmware error log from SRAM. */ 3330 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3331 sizeof (dump) / sizeof (uint32_t)); 3332 iwn_nic_unlock(sc); 3333 3334 if (dump.valid == 0) { 3335 printf("%s: firmware error log is empty\n", __func__); 3336 return; 3337 } 3338 printf("firmware error log:\n"); 3339 printf(" error type = \"%s\" (0x%08X)\n", 3340 (dump.id < nitems(iwn_fw_errmsg)) ? 3341 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3342 dump.id); 3343 printf(" program counter = 0x%08X\n", dump.pc); 3344 printf(" source line = 0x%08X\n", dump.src_line); 3345 printf(" error data = 0x%08X%08X\n", 3346 dump.error_data[0], dump.error_data[1]); 3347 printf(" branch link = 0x%08X%08X\n", 3348 dump.branch_link[0], dump.branch_link[1]); 3349 printf(" interrupt link = 0x%08X%08X\n", 3350 dump.interrupt_link[0], dump.interrupt_link[1]); 3351 printf(" time = %u\n", dump.time[0]); 3352 3353 /* Dump driver status (TX and RX rings) while we're here. */ 3354 printf("driver status:\n"); 3355 for (i = 0; i < sc->ntxqs; i++) { 3356 struct iwn_tx_ring *ring = &sc->txq[i]; 3357 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3358 i, ring->qid, ring->cur, ring->queued); 3359 } 3360 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3361 } 3362 3363 static void 3364 iwn_intr(void *arg) 3365 { 3366 struct iwn_softc *sc = arg; 3367 struct ifnet *ifp = sc->sc_ifp; 3368 uint32_t r1, r2, tmp; 3369 3370 IWN_LOCK(sc); 3371 3372 /* Disable interrupts. */ 3373 IWN_WRITE(sc, IWN_INT_MASK, 0); 3374 3375 /* Read interrupts from ICT (fast) or from registers (slow). */ 3376 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3377 tmp = 0; 3378 while (sc->ict[sc->ict_cur] != 0) { 3379 tmp |= sc->ict[sc->ict_cur]; 3380 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3381 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3382 } 3383 tmp = le32toh(tmp); 3384 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3385 tmp = 0; 3386 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3387 tmp |= 0x8000; 3388 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3389 r2 = 0; /* Unused. */ 3390 } else { 3391 r1 = IWN_READ(sc, IWN_INT); 3392 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3393 return; /* Hardware gone! */ 3394 r2 = IWN_READ(sc, IWN_FH_INT); 3395 } 3396 3397 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3398 , r1, r2); 3399 3400 if (r1 == 0 && r2 == 0) 3401 goto done; /* Interrupt not for us. */ 3402 3403 /* Acknowledge interrupts. */ 3404 IWN_WRITE(sc, IWN_INT, r1); 3405 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3406 IWN_WRITE(sc, IWN_FH_INT, r2); 3407 3408 if (r1 & IWN_INT_RF_TOGGLED) { 3409 iwn_rftoggle_intr(sc); 3410 goto done; 3411 } 3412 if (r1 & IWN_INT_CT_REACHED) { 3413 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3414 __func__); 3415 } 3416 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3417 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3418 __func__); 3419 /* Dump firmware error log and stop. */ 3420 iwn_fatal_intr(sc); 3421 ifp->if_flags &= ~IFF_UP; 3422 iwn_stop_locked(sc); 3423 goto done; 3424 } 3425 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3426 (r2 & IWN_FH_INT_RX)) { 3427 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3428 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3429 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3430 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3431 IWN_INT_PERIODIC_DIS); 3432 iwn_notif_intr(sc); 3433 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3434 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3435 IWN_INT_PERIODIC_ENA); 3436 } 3437 } else 3438 iwn_notif_intr(sc); 3439 } 3440 3441 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3442 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3443 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3444 wakeup(sc); /* FH DMA transfer completed. */ 3445 } 3446 3447 if (r1 & IWN_INT_ALIVE) 3448 wakeup(sc); /* Firmware is alive. */ 3449 3450 if (r1 & IWN_INT_WAKEUP) 3451 iwn_wakeup_intr(sc); 3452 3453 done: 3454 /* Re-enable interrupts. */ 3455 if (ifp->if_flags & IFF_UP) 3456 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3457 3458 IWN_UNLOCK(sc); 3459 } 3460 3461 /* 3462 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3463 * 5000 adapters use a slightly different format). 3464 */ 3465 static void 3466 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3467 uint16_t len) 3468 { 3469 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3470 3471 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3472 3473 *w = htole16(len + 8); 3474 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3475 BUS_DMASYNC_PREWRITE); 3476 if (idx < IWN_SCHED_WINSZ) { 3477 *(w + IWN_TX_RING_COUNT) = *w; 3478 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3479 BUS_DMASYNC_PREWRITE); 3480 } 3481 } 3482 3483 static void 3484 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3485 uint16_t len) 3486 { 3487 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3488 3489 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3490 3491 *w = htole16(id << 12 | (len + 8)); 3492 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3493 BUS_DMASYNC_PREWRITE); 3494 if (idx < IWN_SCHED_WINSZ) { 3495 *(w + IWN_TX_RING_COUNT) = *w; 3496 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3497 BUS_DMASYNC_PREWRITE); 3498 } 3499 } 3500 3501 #ifdef notyet 3502 static void 3503 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3504 { 3505 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3506 3507 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3508 3509 *w = (*w & htole16(0xf000)) | htole16(1); 3510 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3511 BUS_DMASYNC_PREWRITE); 3512 if (idx < IWN_SCHED_WINSZ) { 3513 *(w + IWN_TX_RING_COUNT) = *w; 3514 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3515 BUS_DMASYNC_PREWRITE); 3516 } 3517 } 3518 #endif 3519 3520 static int 3521 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3522 { 3523 struct iwn_ops *ops = &sc->ops; 3524 const struct ieee80211_txparam *tp; 3525 struct ieee80211vap *vap = ni->ni_vap; 3526 struct ieee80211com *ic = ni->ni_ic; 3527 struct iwn_node *wn = (void *)ni; 3528 struct iwn_tx_ring *ring; 3529 struct iwn_tx_desc *desc; 3530 struct iwn_tx_data *data; 3531 struct iwn_tx_cmd *cmd; 3532 struct iwn_cmd_data *tx; 3533 struct ieee80211_frame *wh; 3534 struct ieee80211_key *k = NULL; 3535 struct mbuf *m1; 3536 uint32_t flags; 3537 uint16_t qos; 3538 u_int hdrlen; 3539 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3540 uint8_t tid, ridx, txant, type; 3541 int ac, i, totlen, error, pad, nsegs = 0, rate; 3542 3543 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3544 3545 IWN_LOCK_ASSERT(sc); 3546 3547 wh = mtod(m, struct ieee80211_frame *); 3548 hdrlen = ieee80211_anyhdrsize(wh); 3549 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3550 3551 /* Select EDCA Access Category and TX ring for this frame. */ 3552 if (IEEE80211_QOS_HAS_SEQ(wh)) { 3553 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 3554 tid = qos & IEEE80211_QOS_TID; 3555 } else { 3556 qos = 0; 3557 tid = 0; 3558 } 3559 ac = M_WME_GETAC(m); 3560 if (m->m_flags & M_AMPDU_MPDU) { 3561 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 3562 3563 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3564 m_freem(m); 3565 return EINVAL; 3566 } 3567 3568 ac = *(int *)tap->txa_private; 3569 *(uint16_t *)wh->i_seq = 3570 htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT); 3571 ni->ni_txseqs[tid]++; 3572 } 3573 ring = &sc->txq[ac]; 3574 desc = &ring->desc[ring->cur]; 3575 data = &ring->data[ring->cur]; 3576 3577 /* Choose a TX rate index. */ 3578 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 3579 if (type == IEEE80211_FC0_TYPE_MGT) 3580 rate = tp->mgmtrate; 3581 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 3582 rate = tp->mcastrate; 3583 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 3584 rate = tp->ucastrate; 3585 else { 3586 /* XXX pass pktlen */ 3587 (void) ieee80211_ratectl_rate(ni, NULL, 0); 3588 rate = ni->ni_txrate; 3589 } 3590 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 3591 rate & IEEE80211_RATE_VAL); 3592 3593 /* Encrypt the frame if need be. */ 3594 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 3595 /* Retrieve key for TX. */ 3596 k = ieee80211_crypto_encap(ni, m); 3597 if (k == NULL) { 3598 m_freem(m); 3599 return ENOBUFS; 3600 } 3601 /* 802.11 header may have moved. */ 3602 wh = mtod(m, struct ieee80211_frame *); 3603 } 3604 totlen = m->m_pkthdr.len; 3605 3606 if (ieee80211_radiotap_active_vap(vap)) { 3607 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3608 3609 tap->wt_flags = 0; 3610 tap->wt_rate = rate; 3611 if (k != NULL) 3612 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3613 3614 ieee80211_radiotap_tx(vap, m); 3615 } 3616 3617 /* Prepare TX firmware command. */ 3618 cmd = &ring->cmd[ring->cur]; 3619 cmd->code = IWN_CMD_TX_DATA; 3620 cmd->flags = 0; 3621 cmd->qid = ring->qid; 3622 cmd->idx = ring->cur; 3623 3624 tx = (struct iwn_cmd_data *)cmd->data; 3625 /* NB: No need to clear tx, all fields are reinitialized here. */ 3626 tx->scratch = 0; /* clear "scratch" area */ 3627 3628 flags = 0; 3629 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3630 /* Unicast frame, check if an ACK is expected. */ 3631 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 3632 IEEE80211_QOS_ACKPOLICY_NOACK) 3633 flags |= IWN_TX_NEED_ACK; 3634 } 3635 if ((wh->i_fc[0] & 3636 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3637 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 3638 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 3639 3640 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3641 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3642 3643 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3644 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3645 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3646 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 3647 flags |= IWN_TX_NEED_RTS; 3648 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3649 ridx >= IWN_RIDX_OFDM6) { 3650 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3651 flags |= IWN_TX_NEED_CTS; 3652 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3653 flags |= IWN_TX_NEED_RTS; 3654 } 3655 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3656 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3657 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3658 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3659 flags |= IWN_TX_NEED_PROTECTION; 3660 } else 3661 flags |= IWN_TX_FULL_TXOP; 3662 } 3663 } 3664 3665 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3666 type != IEEE80211_FC0_TYPE_DATA) 3667 tx->id = sc->broadcast_id; 3668 else 3669 tx->id = wn->id; 3670 3671 if (type == IEEE80211_FC0_TYPE_MGT) { 3672 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3673 3674 /* Tell HW to set timestamp in probe responses. */ 3675 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3676 flags |= IWN_TX_INSERT_TSTAMP; 3677 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3678 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3679 tx->timeout = htole16(3); 3680 else 3681 tx->timeout = htole16(2); 3682 } else 3683 tx->timeout = htole16(0); 3684 3685 if (hdrlen & 3) { 3686 /* First segment length must be a multiple of 4. */ 3687 flags |= IWN_TX_NEED_PADDING; 3688 pad = 4 - (hdrlen & 3); 3689 } else 3690 pad = 0; 3691 3692 tx->len = htole16(totlen); 3693 tx->tid = tid; 3694 tx->rts_ntries = 60; 3695 tx->data_ntries = 15; 3696 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3697 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 3698 if (tx->id == sc->broadcast_id) { 3699 /* Group or management frame. */ 3700 tx->linkq = 0; 3701 /* XXX Alternate between antenna A and B? */ 3702 txant = IWN_LSB(sc->txchainmask); 3703 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3704 } else { 3705 tx->linkq = ni->ni_rates.rs_nrates - ridx - 1; 3706 flags |= IWN_TX_LINKQ; /* enable MRR */ 3707 } 3708 /* Set physical address of "scratch area". */ 3709 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3710 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3711 3712 /* Copy 802.11 header in TX command. */ 3713 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3714 3715 /* Trim 802.11 header. */ 3716 m_adj(m, hdrlen); 3717 tx->security = 0; 3718 tx->flags = htole32(flags); 3719 3720 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3721 &nsegs, BUS_DMA_NOWAIT); 3722 if (error != 0) { 3723 if (error != EFBIG) { 3724 device_printf(sc->sc_dev, 3725 "%s: can't map mbuf (error %d)\n", __func__, error); 3726 m_freem(m); 3727 return error; 3728 } 3729 /* Too many DMA segments, linearize mbuf. */ 3730 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 3731 if (m1 == NULL) { 3732 device_printf(sc->sc_dev, 3733 "%s: could not defrag mbuf\n", __func__); 3734 m_freem(m); 3735 return ENOBUFS; 3736 } 3737 m = m1; 3738 3739 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3740 segs, &nsegs, BUS_DMA_NOWAIT); 3741 if (error != 0) { 3742 device_printf(sc->sc_dev, 3743 "%s: can't map mbuf (error %d)\n", __func__, error); 3744 m_freem(m); 3745 return error; 3746 } 3747 } 3748 3749 data->m = m; 3750 data->ni = ni; 3751 3752 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3753 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3754 3755 /* Fill TX descriptor. */ 3756 desc->nsegs = 1; 3757 if (m->m_len != 0) 3758 desc->nsegs += nsegs; 3759 /* First DMA segment is used by the TX command. */ 3760 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3761 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3762 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3763 /* Other DMA segments are for data payload. */ 3764 seg = &segs[0]; 3765 for (i = 1; i <= nsegs; i++) { 3766 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3767 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3768 seg->ds_len << 4); 3769 seg++; 3770 } 3771 3772 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3773 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3774 BUS_DMASYNC_PREWRITE); 3775 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3776 BUS_DMASYNC_PREWRITE); 3777 3778 /* Update TX scheduler. */ 3779 if (ring->qid >= sc->firstaggqueue) 3780 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3781 3782 /* Kick TX ring. */ 3783 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3784 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3785 3786 /* Mark TX ring as full if we reach a certain threshold. */ 3787 if (++ring->queued > IWN_TX_RING_HIMARK) 3788 sc->qfullmsk |= 1 << ring->qid; 3789 3790 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3791 3792 return 0; 3793 } 3794 3795 static int 3796 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3797 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 3798 { 3799 struct iwn_ops *ops = &sc->ops; 3800 struct ifnet *ifp = sc->sc_ifp; 3801 struct ieee80211vap *vap = ni->ni_vap; 3802 struct ieee80211com *ic = ifp->if_l2com; 3803 struct iwn_tx_cmd *cmd; 3804 struct iwn_cmd_data *tx; 3805 struct ieee80211_frame *wh; 3806 struct iwn_tx_ring *ring; 3807 struct iwn_tx_desc *desc; 3808 struct iwn_tx_data *data; 3809 struct mbuf *m1; 3810 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3811 uint32_t flags; 3812 u_int hdrlen; 3813 int ac, totlen, error, pad, nsegs = 0, i, rate; 3814 uint8_t ridx, type, txant; 3815 3816 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3817 3818 IWN_LOCK_ASSERT(sc); 3819 3820 wh = mtod(m, struct ieee80211_frame *); 3821 hdrlen = ieee80211_anyhdrsize(wh); 3822 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3823 3824 ac = params->ibp_pri & 3; 3825 3826 ring = &sc->txq[ac]; 3827 desc = &ring->desc[ring->cur]; 3828 data = &ring->data[ring->cur]; 3829 3830 /* Choose a TX rate index. */ 3831 rate = params->ibp_rate0; 3832 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 3833 rate & IEEE80211_RATE_VAL); 3834 if (ridx == (uint8_t)-1) { 3835 /* XXX fall back to mcast/mgmt rate? */ 3836 m_freem(m); 3837 return EINVAL; 3838 } 3839 3840 totlen = m->m_pkthdr.len; 3841 3842 /* Prepare TX firmware command. */ 3843 cmd = &ring->cmd[ring->cur]; 3844 cmd->code = IWN_CMD_TX_DATA; 3845 cmd->flags = 0; 3846 cmd->qid = ring->qid; 3847 cmd->idx = ring->cur; 3848 3849 tx = (struct iwn_cmd_data *)cmd->data; 3850 /* NB: No need to clear tx, all fields are reinitialized here. */ 3851 tx->scratch = 0; /* clear "scratch" area */ 3852 3853 flags = 0; 3854 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3855 flags |= IWN_TX_NEED_ACK; 3856 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3857 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3858 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3859 flags &= ~IWN_TX_NEED_RTS; 3860 flags |= IWN_TX_NEED_PROTECTION; 3861 } else 3862 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3863 } 3864 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3865 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3866 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3867 flags &= ~IWN_TX_NEED_CTS; 3868 flags |= IWN_TX_NEED_PROTECTION; 3869 } else 3870 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3871 } 3872 if (type == IEEE80211_FC0_TYPE_MGT) { 3873 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3874 3875 /* Tell HW to set timestamp in probe responses. */ 3876 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3877 flags |= IWN_TX_INSERT_TSTAMP; 3878 3879 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3880 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3881 tx->timeout = htole16(3); 3882 else 3883 tx->timeout = htole16(2); 3884 } else 3885 tx->timeout = htole16(0); 3886 3887 if (hdrlen & 3) { 3888 /* First segment length must be a multiple of 4. */ 3889 flags |= IWN_TX_NEED_PADDING; 3890 pad = 4 - (hdrlen & 3); 3891 } else 3892 pad = 0; 3893 3894 if (ieee80211_radiotap_active_vap(vap)) { 3895 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3896 3897 tap->wt_flags = 0; 3898 tap->wt_rate = rate; 3899 3900 ieee80211_radiotap_tx(vap, m); 3901 } 3902 3903 tx->len = htole16(totlen); 3904 tx->tid = 0; 3905 tx->id = sc->broadcast_id; 3906 tx->rts_ntries = params->ibp_try1; 3907 tx->data_ntries = params->ibp_try0; 3908 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3909 3910 /* XXX should just use iwn_rate_to_plcp() */ 3911 tx->rate = htole32(rate2plcp(rate)); 3912 if (ridx < IWN_RIDX_OFDM6 && 3913 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 3914 tx->rate |= htole32(IWN_RFLAG_CCK); 3915 3916 /* Group or management frame. */ 3917 tx->linkq = 0; 3918 txant = IWN_LSB(sc->txchainmask); 3919 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3920 3921 /* Set physical address of "scratch area". */ 3922 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3923 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3924 3925 /* Copy 802.11 header in TX command. */ 3926 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3927 3928 /* Trim 802.11 header. */ 3929 m_adj(m, hdrlen); 3930 tx->security = 0; 3931 tx->flags = htole32(flags); 3932 3933 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3934 &nsegs, BUS_DMA_NOWAIT); 3935 if (error != 0) { 3936 if (error != EFBIG) { 3937 device_printf(sc->sc_dev, 3938 "%s: can't map mbuf (error %d)\n", __func__, error); 3939 m_freem(m); 3940 return error; 3941 } 3942 /* Too many DMA segments, linearize mbuf. */ 3943 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 3944 if (m1 == NULL) { 3945 device_printf(sc->sc_dev, 3946 "%s: could not defrag mbuf\n", __func__); 3947 m_freem(m); 3948 return ENOBUFS; 3949 } 3950 m = m1; 3951 3952 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3953 segs, &nsegs, BUS_DMA_NOWAIT); 3954 if (error != 0) { 3955 device_printf(sc->sc_dev, 3956 "%s: can't map mbuf (error %d)\n", __func__, error); 3957 m_freem(m); 3958 return error; 3959 } 3960 } 3961 3962 data->m = m; 3963 data->ni = ni; 3964 3965 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3966 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3967 3968 /* Fill TX descriptor. */ 3969 desc->nsegs = 1; 3970 if (m->m_len != 0) 3971 desc->nsegs += nsegs; 3972 /* First DMA segment is used by the TX command. */ 3973 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3974 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3975 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3976 /* Other DMA segments are for data payload. */ 3977 seg = &segs[0]; 3978 for (i = 1; i <= nsegs; i++) { 3979 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3980 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3981 seg->ds_len << 4); 3982 seg++; 3983 } 3984 3985 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3986 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3987 BUS_DMASYNC_PREWRITE); 3988 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3989 BUS_DMASYNC_PREWRITE); 3990 3991 /* Update TX scheduler. */ 3992 if (ring->qid >= sc->firstaggqueue) 3993 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3994 3995 /* Kick TX ring. */ 3996 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3997 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3998 3999 /* Mark TX ring as full if we reach a certain threshold. */ 4000 if (++ring->queued > IWN_TX_RING_HIMARK) 4001 sc->qfullmsk |= 1 << ring->qid; 4002 4003 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4004 4005 return 0; 4006 } 4007 4008 static int 4009 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4010 const struct ieee80211_bpf_params *params) 4011 { 4012 struct ieee80211com *ic = ni->ni_ic; 4013 struct ifnet *ifp = ic->ic_ifp; 4014 struct iwn_softc *sc = ifp->if_softc; 4015 int error = 0; 4016 4017 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4018 4019 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4020 ieee80211_free_node(ni); 4021 m_freem(m); 4022 return ENETDOWN; 4023 } 4024 4025 IWN_LOCK(sc); 4026 if (params == NULL) { 4027 /* 4028 * Legacy path; interpret frame contents to decide 4029 * precisely how to send the frame. 4030 */ 4031 error = iwn_tx_data(sc, m, ni); 4032 } else { 4033 /* 4034 * Caller supplied explicit parameters to use in 4035 * sending the frame. 4036 */ 4037 error = iwn_tx_data_raw(sc, m, ni, params); 4038 } 4039 if (error != 0) { 4040 /* NB: m is reclaimed on tx failure */ 4041 ieee80211_free_node(ni); 4042 ifp->if_oerrors++; 4043 } 4044 sc->sc_tx_timer = 5; 4045 4046 IWN_UNLOCK(sc); 4047 4048 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4049 4050 return error; 4051 } 4052 4053 static void 4054 iwn_start(struct ifnet *ifp) 4055 { 4056 struct iwn_softc *sc = ifp->if_softc; 4057 4058 IWN_LOCK(sc); 4059 iwn_start_locked(ifp); 4060 IWN_UNLOCK(sc); 4061 } 4062 4063 static void 4064 iwn_start_locked(struct ifnet *ifp) 4065 { 4066 struct iwn_softc *sc = ifp->if_softc; 4067 struct ieee80211_node *ni; 4068 struct mbuf *m; 4069 4070 IWN_LOCK_ASSERT(sc); 4071 4072 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4073 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4074 return; 4075 4076 for (;;) { 4077 if (sc->qfullmsk != 0) { 4078 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4079 break; 4080 } 4081 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4082 if (m == NULL) 4083 break; 4084 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4085 if (iwn_tx_data(sc, m, ni) != 0) { 4086 ieee80211_free_node(ni); 4087 ifp->if_oerrors++; 4088 continue; 4089 } 4090 sc->sc_tx_timer = 5; 4091 } 4092 } 4093 4094 static void 4095 iwn_watchdog(void *arg) 4096 { 4097 struct iwn_softc *sc = arg; 4098 struct ifnet *ifp = sc->sc_ifp; 4099 struct ieee80211com *ic = ifp->if_l2com; 4100 4101 IWN_LOCK_ASSERT(sc); 4102 4103 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4104 4105 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4106 4107 if (sc->sc_tx_timer > 0) { 4108 if (--sc->sc_tx_timer == 0) { 4109 if_printf(ifp, "device timeout\n"); 4110 ieee80211_runtask(ic, &sc->sc_reinit_task); 4111 return; 4112 } 4113 } 4114 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4115 } 4116 4117 static int 4118 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4119 { 4120 struct iwn_softc *sc = ifp->if_softc; 4121 struct ieee80211com *ic = ifp->if_l2com; 4122 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4123 struct ifreq *ifr = (struct ifreq *) data; 4124 int error = 0, startall = 0, stop = 0; 4125 4126 switch (cmd) { 4127 case SIOCGIFADDR: 4128 error = ether_ioctl(ifp, cmd, data); 4129 break; 4130 case SIOCSIFFLAGS: 4131 IWN_LOCK(sc); 4132 if (ifp->if_flags & IFF_UP) { 4133 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4134 iwn_init_locked(sc); 4135 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4136 startall = 1; 4137 else 4138 stop = 1; 4139 } 4140 } else { 4141 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4142 iwn_stop_locked(sc); 4143 } 4144 IWN_UNLOCK(sc); 4145 if (startall) 4146 ieee80211_start_all(ic); 4147 else if (vap != NULL && stop) 4148 ieee80211_stop(vap); 4149 break; 4150 case SIOCGIFMEDIA: 4151 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4152 break; 4153 default: 4154 error = EINVAL; 4155 break; 4156 } 4157 return error; 4158 } 4159 4160 /* 4161 * Send a command to the firmware. 4162 */ 4163 static int 4164 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4165 { 4166 struct iwn_tx_ring *ring = &sc->txq[4]; 4167 struct iwn_tx_desc *desc; 4168 struct iwn_tx_data *data; 4169 struct iwn_tx_cmd *cmd; 4170 struct mbuf *m; 4171 bus_addr_t paddr; 4172 int totlen, error; 4173 4174 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4175 4176 if (async == 0) 4177 IWN_LOCK_ASSERT(sc); 4178 4179 desc = &ring->desc[ring->cur]; 4180 data = &ring->data[ring->cur]; 4181 totlen = 4 + size; 4182 4183 if (size > sizeof cmd->data) { 4184 /* Command is too large to fit in a descriptor. */ 4185 if (totlen > MCLBYTES) 4186 return EINVAL; 4187 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4188 if (m == NULL) 4189 return ENOMEM; 4190 cmd = mtod(m, struct iwn_tx_cmd *); 4191 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4192 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4193 if (error != 0) { 4194 m_freem(m); 4195 return error; 4196 } 4197 data->m = m; 4198 } else { 4199 cmd = &ring->cmd[ring->cur]; 4200 paddr = data->cmd_paddr; 4201 } 4202 4203 cmd->code = code; 4204 cmd->flags = 0; 4205 cmd->qid = ring->qid; 4206 cmd->idx = ring->cur; 4207 memcpy(cmd->data, buf, size); 4208 4209 desc->nsegs = 1; 4210 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4211 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4212 4213 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4214 __func__, iwn_intr_str(cmd->code), cmd->code, 4215 cmd->flags, cmd->qid, cmd->idx); 4216 4217 if (size > sizeof cmd->data) { 4218 bus_dmamap_sync(ring->data_dmat, data->map, 4219 BUS_DMASYNC_PREWRITE); 4220 } else { 4221 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4222 BUS_DMASYNC_PREWRITE); 4223 } 4224 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4225 BUS_DMASYNC_PREWRITE); 4226 4227 /* Kick command ring. */ 4228 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4229 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4230 4231 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4232 4233 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4234 } 4235 4236 static int 4237 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4238 { 4239 struct iwn4965_node_info hnode; 4240 caddr_t src, dst; 4241 4242 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4243 4244 /* 4245 * We use the node structure for 5000 Series internally (it is 4246 * a superset of the one for 4965AGN). We thus copy the common 4247 * fields before sending the command. 4248 */ 4249 src = (caddr_t)node; 4250 dst = (caddr_t)&hnode; 4251 memcpy(dst, src, 48); 4252 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4253 memcpy(dst + 48, src + 72, 20); 4254 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4255 } 4256 4257 static int 4258 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4259 { 4260 4261 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4262 4263 /* Direct mapping. */ 4264 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4265 } 4266 4267 static int 4268 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4269 { 4270 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4271 struct iwn_node *wn = (void *)ni; 4272 struct ieee80211_rateset *rs = &ni->ni_rates; 4273 struct iwn_cmd_link_quality linkq; 4274 uint8_t txant; 4275 int i, rate, txrate; 4276 4277 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4278 4279 /* Use the first valid TX antenna. */ 4280 txant = IWN_LSB(sc->txchainmask); 4281 4282 memset(&linkq, 0, sizeof linkq); 4283 linkq.id = wn->id; 4284 linkq.antmsk_1stream = txant; 4285 linkq.antmsk_2stream = IWN_ANT_AB; 4286 linkq.ampdu_max = 64; 4287 linkq.ampdu_threshold = 3; 4288 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4289 4290 /* Start at highest available bit-rate. */ 4291 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4292 txrate = ni->ni_htrates.rs_nrates - 1; 4293 else 4294 txrate = rs->rs_nrates - 1; 4295 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4296 uint32_t plcp; 4297 4298 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4299 rate = IEEE80211_RATE_MCS | txrate; 4300 else 4301 rate = RV(rs->rs_rates[txrate]); 4302 4303 /* Do rate -> PLCP config mapping */ 4304 plcp = iwn_rate_to_plcp(sc, ni, rate); 4305 linkq.retry[i] = plcp; 4306 4307 /* Special case for dual-stream rates? */ 4308 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4309 RV(le32toh(plcp)) > 7) 4310 linkq.mimo = i + 1; 4311 4312 /* Next retry at immediate lower bit-rate. */ 4313 if (txrate > 0) 4314 txrate--; 4315 } 4316 4317 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4318 4319 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4320 #undef RV 4321 } 4322 4323 /* 4324 * Broadcast node is used to send group-addressed and management frames. 4325 */ 4326 static int 4327 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4328 { 4329 struct iwn_ops *ops = &sc->ops; 4330 struct ifnet *ifp = sc->sc_ifp; 4331 struct ieee80211com *ic = ifp->if_l2com; 4332 struct iwn_node_info node; 4333 struct iwn_cmd_link_quality linkq; 4334 uint8_t txant; 4335 int i, error; 4336 4337 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4338 4339 memset(&node, 0, sizeof node); 4340 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4341 node.id = sc->broadcast_id; 4342 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4343 if ((error = ops->add_node(sc, &node, async)) != 0) 4344 return error; 4345 4346 /* Use the first valid TX antenna. */ 4347 txant = IWN_LSB(sc->txchainmask); 4348 4349 memset(&linkq, 0, sizeof linkq); 4350 linkq.id = sc->broadcast_id; 4351 linkq.antmsk_1stream = txant; 4352 linkq.antmsk_2stream = IWN_ANT_AB; 4353 linkq.ampdu_max = 64; 4354 linkq.ampdu_threshold = 3; 4355 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4356 4357 /* Use lowest mandatory bit-rate. */ 4358 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4359 linkq.retry[0] = htole32(0xd); 4360 else 4361 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 4362 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 4363 /* Use same bit-rate for all TX retries. */ 4364 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 4365 linkq.retry[i] = linkq.retry[0]; 4366 } 4367 4368 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4369 4370 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 4371 } 4372 4373 static int 4374 iwn_updateedca(struct ieee80211com *ic) 4375 { 4376 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 4377 struct iwn_softc *sc = ic->ic_ifp->if_softc; 4378 struct iwn_edca_params cmd; 4379 int aci; 4380 4381 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4382 4383 memset(&cmd, 0, sizeof cmd); 4384 cmd.flags = htole32(IWN_EDCA_UPDATE); 4385 for (aci = 0; aci < WME_NUM_AC; aci++) { 4386 const struct wmeParams *ac = 4387 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 4388 cmd.ac[aci].aifsn = ac->wmep_aifsn; 4389 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 4390 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 4391 cmd.ac[aci].txoplimit = 4392 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 4393 } 4394 IEEE80211_UNLOCK(ic); 4395 IWN_LOCK(sc); 4396 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 4397 IWN_UNLOCK(sc); 4398 IEEE80211_LOCK(ic); 4399 4400 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4401 4402 return 0; 4403 #undef IWN_EXP2 4404 } 4405 4406 static void 4407 iwn_update_mcast(struct ifnet *ifp) 4408 { 4409 /* Ignore */ 4410 } 4411 4412 static void 4413 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 4414 { 4415 struct iwn_cmd_led led; 4416 4417 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4418 4419 /* Clear microcode LED ownership. */ 4420 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 4421 4422 led.which = which; 4423 led.unit = htole32(10000); /* on/off in unit of 100ms */ 4424 led.off = off; 4425 led.on = on; 4426 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 4427 } 4428 4429 /* 4430 * Set the critical temperature at which the firmware will stop the radio 4431 * and notify us. 4432 */ 4433 static int 4434 iwn_set_critical_temp(struct iwn_softc *sc) 4435 { 4436 struct iwn_critical_temp crit; 4437 int32_t temp; 4438 4439 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4440 4441 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 4442 4443 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 4444 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 4445 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 4446 temp = IWN_CTOK(110); 4447 else 4448 temp = 110; 4449 memset(&crit, 0, sizeof crit); 4450 crit.tempR = htole32(temp); 4451 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 4452 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 4453 } 4454 4455 static int 4456 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 4457 { 4458 struct iwn_cmd_timing cmd; 4459 uint64_t val, mod; 4460 4461 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4462 4463 memset(&cmd, 0, sizeof cmd); 4464 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 4465 cmd.bintval = htole16(ni->ni_intval); 4466 cmd.lintval = htole16(10); 4467 4468 /* Compute remaining time until next beacon. */ 4469 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 4470 mod = le64toh(cmd.tstamp) % val; 4471 cmd.binitval = htole32((uint32_t)(val - mod)); 4472 4473 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 4474 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 4475 4476 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 4477 } 4478 4479 static void 4480 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 4481 { 4482 struct ifnet *ifp = sc->sc_ifp; 4483 struct ieee80211com *ic = ifp->if_l2com; 4484 4485 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4486 4487 /* Adjust TX power if need be (delta >= 3 degC). */ 4488 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 4489 __func__, sc->temp, temp); 4490 if (abs(temp - sc->temp) >= 3) { 4491 /* Record temperature of last calibration. */ 4492 sc->temp = temp; 4493 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 4494 } 4495 } 4496 4497 /* 4498 * Set TX power for current channel (each rate has its own power settings). 4499 * This function takes into account the regulatory information from EEPROM, 4500 * the current temperature and the current voltage. 4501 */ 4502 static int 4503 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4504 int async) 4505 { 4506 /* Fixed-point arithmetic division using a n-bit fractional part. */ 4507 #define fdivround(a, b, n) \ 4508 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 4509 /* Linear interpolation. */ 4510 #define interpolate(x, x1, y1, x2, y2, n) \ 4511 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 4512 4513 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 4514 struct iwn_ucode_info *uc = &sc->ucode_info; 4515 struct iwn4965_cmd_txpower cmd; 4516 struct iwn4965_eeprom_chan_samples *chans; 4517 const uint8_t *rf_gain, *dsp_gain; 4518 int32_t vdiff, tdiff; 4519 int i, c, grp, maxpwr; 4520 uint8_t chan; 4521 4522 /* Retrieve current channel from last RXON. */ 4523 chan = sc->rxon.chan; 4524 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 4525 chan); 4526 4527 memset(&cmd, 0, sizeof cmd); 4528 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 4529 cmd.chan = chan; 4530 4531 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 4532 maxpwr = sc->maxpwr5GHz; 4533 rf_gain = iwn4965_rf_gain_5ghz; 4534 dsp_gain = iwn4965_dsp_gain_5ghz; 4535 } else { 4536 maxpwr = sc->maxpwr2GHz; 4537 rf_gain = iwn4965_rf_gain_2ghz; 4538 dsp_gain = iwn4965_dsp_gain_2ghz; 4539 } 4540 4541 /* Compute voltage compensation. */ 4542 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 4543 if (vdiff > 0) 4544 vdiff *= 2; 4545 if (abs(vdiff) > 2) 4546 vdiff = 0; 4547 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4548 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 4549 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 4550 4551 /* Get channel attenuation group. */ 4552 if (chan <= 20) /* 1-20 */ 4553 grp = 4; 4554 else if (chan <= 43) /* 34-43 */ 4555 grp = 0; 4556 else if (chan <= 70) /* 44-70 */ 4557 grp = 1; 4558 else if (chan <= 124) /* 71-124 */ 4559 grp = 2; 4560 else /* 125-200 */ 4561 grp = 3; 4562 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4563 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 4564 4565 /* Get channel sub-band. */ 4566 for (i = 0; i < IWN_NBANDS; i++) 4567 if (sc->bands[i].lo != 0 && 4568 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 4569 break; 4570 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 4571 return EINVAL; 4572 chans = sc->bands[i].chans; 4573 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4574 "%s: chan %d sub-band=%d\n", __func__, chan, i); 4575 4576 for (c = 0; c < 2; c++) { 4577 uint8_t power, gain, temp; 4578 int maxchpwr, pwr, ridx, idx; 4579 4580 power = interpolate(chan, 4581 chans[0].num, chans[0].samples[c][1].power, 4582 chans[1].num, chans[1].samples[c][1].power, 1); 4583 gain = interpolate(chan, 4584 chans[0].num, chans[0].samples[c][1].gain, 4585 chans[1].num, chans[1].samples[c][1].gain, 1); 4586 temp = interpolate(chan, 4587 chans[0].num, chans[0].samples[c][1].temp, 4588 chans[1].num, chans[1].samples[c][1].temp, 1); 4589 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4590 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 4591 __func__, c, power, gain, temp); 4592 4593 /* Compute temperature compensation. */ 4594 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 4595 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4596 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 4597 __func__, tdiff, sc->temp, temp); 4598 4599 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 4600 /* Convert dBm to half-dBm. */ 4601 maxchpwr = sc->maxpwr[chan] * 2; 4602 if ((ridx / 8) & 1) 4603 maxchpwr -= 6; /* MIMO 2T: -3dB */ 4604 4605 pwr = maxpwr; 4606 4607 /* Adjust TX power based on rate. */ 4608 if ((ridx % 8) == 5) 4609 pwr -= 15; /* OFDM48: -7.5dB */ 4610 else if ((ridx % 8) == 6) 4611 pwr -= 17; /* OFDM54: -8.5dB */ 4612 else if ((ridx % 8) == 7) 4613 pwr -= 20; /* OFDM60: -10dB */ 4614 else 4615 pwr -= 10; /* Others: -5dB */ 4616 4617 /* Do not exceed channel max TX power. */ 4618 if (pwr > maxchpwr) 4619 pwr = maxchpwr; 4620 4621 idx = gain - (pwr - power) - tdiff - vdiff; 4622 if ((ridx / 8) & 1) /* MIMO */ 4623 idx += (int32_t)le32toh(uc->atten[grp][c]); 4624 4625 if (cmd.band == 0) 4626 idx += 9; /* 5GHz */ 4627 if (ridx == IWN_RIDX_MAX) 4628 idx += 5; /* CCK */ 4629 4630 /* Make sure idx stays in a valid range. */ 4631 if (idx < 0) 4632 idx = 0; 4633 else if (idx > IWN4965_MAX_PWR_INDEX) 4634 idx = IWN4965_MAX_PWR_INDEX; 4635 4636 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4637 "%s: Tx chain %d, rate idx %d: power=%d\n", 4638 __func__, c, ridx, idx); 4639 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 4640 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 4641 } 4642 } 4643 4644 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4645 "%s: set tx power for chan %d\n", __func__, chan); 4646 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 4647 4648 #undef interpolate 4649 #undef fdivround 4650 } 4651 4652 static int 4653 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4654 int async) 4655 { 4656 struct iwn5000_cmd_txpower cmd; 4657 4658 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4659 4660 /* 4661 * TX power calibration is handled automatically by the firmware 4662 * for 5000 Series. 4663 */ 4664 memset(&cmd, 0, sizeof cmd); 4665 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 4666 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 4667 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 4668 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 4669 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 4670 } 4671 4672 /* 4673 * Retrieve the maximum RSSI (in dBm) among receivers. 4674 */ 4675 static int 4676 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4677 { 4678 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 4679 uint8_t mask, agc; 4680 int rssi; 4681 4682 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4683 4684 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 4685 agc = (le16toh(phy->agc) >> 7) & 0x7f; 4686 4687 rssi = 0; 4688 if (mask & IWN_ANT_A) 4689 rssi = MAX(rssi, phy->rssi[0]); 4690 if (mask & IWN_ANT_B) 4691 rssi = MAX(rssi, phy->rssi[2]); 4692 if (mask & IWN_ANT_C) 4693 rssi = MAX(rssi, phy->rssi[4]); 4694 4695 DPRINTF(sc, IWN_DEBUG_RECV, 4696 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 4697 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 4698 rssi - agc - IWN_RSSI_TO_DBM); 4699 return rssi - agc - IWN_RSSI_TO_DBM; 4700 } 4701 4702 static int 4703 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4704 { 4705 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 4706 uint8_t agc; 4707 int rssi; 4708 4709 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4710 4711 agc = (le32toh(phy->agc) >> 9) & 0x7f; 4712 4713 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 4714 le16toh(phy->rssi[1]) & 0xff); 4715 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 4716 4717 DPRINTF(sc, IWN_DEBUG_RECV, 4718 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 4719 phy->rssi[0], phy->rssi[1], phy->rssi[2], 4720 rssi - agc - IWN_RSSI_TO_DBM); 4721 return rssi - agc - IWN_RSSI_TO_DBM; 4722 } 4723 4724 /* 4725 * Retrieve the average noise (in dBm) among receivers. 4726 */ 4727 static int 4728 iwn_get_noise(const struct iwn_rx_general_stats *stats) 4729 { 4730 int i, total, nbant, noise; 4731 4732 total = nbant = 0; 4733 for (i = 0; i < 3; i++) { 4734 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 4735 continue; 4736 total += noise; 4737 nbant++; 4738 } 4739 /* There should be at least one antenna but check anyway. */ 4740 return (nbant == 0) ? -127 : (total / nbant) - 107; 4741 } 4742 4743 /* 4744 * Compute temperature (in degC) from last received statistics. 4745 */ 4746 static int 4747 iwn4965_get_temperature(struct iwn_softc *sc) 4748 { 4749 struct iwn_ucode_info *uc = &sc->ucode_info; 4750 int32_t r1, r2, r3, r4, temp; 4751 4752 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4753 4754 r1 = le32toh(uc->temp[0].chan20MHz); 4755 r2 = le32toh(uc->temp[1].chan20MHz); 4756 r3 = le32toh(uc->temp[2].chan20MHz); 4757 r4 = le32toh(sc->rawtemp); 4758 4759 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 4760 return 0; 4761 4762 /* Sign-extend 23-bit R4 value to 32-bit. */ 4763 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 4764 /* Compute temperature in Kelvin. */ 4765 temp = (259 * (r4 - r2)) / (r3 - r1); 4766 temp = (temp * 97) / 100 + 8; 4767 4768 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4769 IWN_KTOC(temp)); 4770 return IWN_KTOC(temp); 4771 } 4772 4773 static int 4774 iwn5000_get_temperature(struct iwn_softc *sc) 4775 { 4776 int32_t temp; 4777 4778 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4779 4780 /* 4781 * Temperature is not used by the driver for 5000 Series because 4782 * TX power calibration is handled by firmware. 4783 */ 4784 temp = le32toh(sc->rawtemp); 4785 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4786 temp = (temp / -5) + sc->temp_off; 4787 temp = IWN_KTOC(temp); 4788 } 4789 return temp; 4790 } 4791 4792 /* 4793 * Initialize sensitivity calibration state machine. 4794 */ 4795 static int 4796 iwn_init_sensitivity(struct iwn_softc *sc) 4797 { 4798 struct iwn_ops *ops = &sc->ops; 4799 struct iwn_calib_state *calib = &sc->calib; 4800 uint32_t flags; 4801 int error; 4802 4803 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4804 4805 /* Reset calibration state machine. */ 4806 memset(calib, 0, sizeof (*calib)); 4807 calib->state = IWN_CALIB_STATE_INIT; 4808 calib->cck_state = IWN_CCK_STATE_HIFA; 4809 /* Set initial correlation values. */ 4810 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4811 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4812 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4813 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4814 calib->cck_x4 = 125; 4815 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4816 calib->energy_cck = sc->limits->energy_cck; 4817 4818 /* Write initial sensitivity. */ 4819 if ((error = iwn_send_sensitivity(sc)) != 0) 4820 return error; 4821 4822 /* Write initial gains. */ 4823 if ((error = ops->init_gains(sc)) != 0) 4824 return error; 4825 4826 /* Request statistics at each beacon interval. */ 4827 flags = 0; 4828 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 4829 __func__); 4830 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4831 } 4832 4833 /* 4834 * Collect noise and RSSI statistics for the first 20 beacons received 4835 * after association and use them to determine connected antennas and 4836 * to set differential gains. 4837 */ 4838 static void 4839 iwn_collect_noise(struct iwn_softc *sc, 4840 const struct iwn_rx_general_stats *stats) 4841 { 4842 struct iwn_ops *ops = &sc->ops; 4843 struct iwn_calib_state *calib = &sc->calib; 4844 struct ifnet *ifp = sc->sc_ifp; 4845 struct ieee80211com *ic = ifp->if_l2com; 4846 uint32_t val; 4847 int i; 4848 4849 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4850 4851 /* Accumulate RSSI and noise for all 3 antennas. */ 4852 for (i = 0; i < 3; i++) { 4853 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4854 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4855 } 4856 /* NB: We update differential gains only once after 20 beacons. */ 4857 if (++calib->nbeacons < 20) 4858 return; 4859 4860 /* Determine highest average RSSI. */ 4861 val = MAX(calib->rssi[0], calib->rssi[1]); 4862 val = MAX(calib->rssi[2], val); 4863 4864 /* Determine which antennas are connected. */ 4865 sc->chainmask = sc->rxchainmask; 4866 for (i = 0; i < 3; i++) 4867 if (val - calib->rssi[i] > 15 * 20) 4868 sc->chainmask &= ~(1 << i); 4869 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4870 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4871 __func__, sc->rxchainmask, sc->chainmask); 4872 4873 /* If none of the TX antennas are connected, keep at least one. */ 4874 if ((sc->chainmask & sc->txchainmask) == 0) 4875 sc->chainmask |= IWN_LSB(sc->txchainmask); 4876 4877 (void)ops->set_gains(sc); 4878 calib->state = IWN_CALIB_STATE_RUN; 4879 4880 #ifdef notyet 4881 /* XXX Disable RX chains with no antennas connected. */ 4882 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4883 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4884 #endif 4885 4886 /* Enable power-saving mode if requested by user. */ 4887 if (ic->ic_flags & IEEE80211_F_PMGTON) 4888 (void)iwn_set_pslevel(sc, 0, 3, 1); 4889 4890 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4891 4892 } 4893 4894 static int 4895 iwn4965_init_gains(struct iwn_softc *sc) 4896 { 4897 struct iwn_phy_calib_gain cmd; 4898 4899 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4900 4901 memset(&cmd, 0, sizeof cmd); 4902 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4903 /* Differential gains initially set to 0 for all 3 antennas. */ 4904 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4905 "%s: setting initial differential gains\n", __func__); 4906 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4907 } 4908 4909 static int 4910 iwn5000_init_gains(struct iwn_softc *sc) 4911 { 4912 struct iwn_phy_calib cmd; 4913 4914 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4915 4916 memset(&cmd, 0, sizeof cmd); 4917 cmd.code = sc->reset_noise_gain; 4918 cmd.ngroups = 1; 4919 cmd.isvalid = 1; 4920 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4921 "%s: setting initial differential gains\n", __func__); 4922 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4923 } 4924 4925 static int 4926 iwn4965_set_gains(struct iwn_softc *sc) 4927 { 4928 struct iwn_calib_state *calib = &sc->calib; 4929 struct iwn_phy_calib_gain cmd; 4930 int i, delta, noise; 4931 4932 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4933 4934 /* Get minimal noise among connected antennas. */ 4935 noise = INT_MAX; /* NB: There's at least one antenna. */ 4936 for (i = 0; i < 3; i++) 4937 if (sc->chainmask & (1 << i)) 4938 noise = MIN(calib->noise[i], noise); 4939 4940 memset(&cmd, 0, sizeof cmd); 4941 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4942 /* Set differential gains for connected antennas. */ 4943 for (i = 0; i < 3; i++) { 4944 if (sc->chainmask & (1 << i)) { 4945 /* Compute attenuation (in unit of 1.5dB). */ 4946 delta = (noise - (int32_t)calib->noise[i]) / 30; 4947 /* NB: delta <= 0 */ 4948 /* Limit to [-4.5dB,0]. */ 4949 cmd.gain[i] = MIN(abs(delta), 3); 4950 if (delta < 0) 4951 cmd.gain[i] |= 1 << 2; /* sign bit */ 4952 } 4953 } 4954 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4955 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4956 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4957 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4958 } 4959 4960 static int 4961 iwn5000_set_gains(struct iwn_softc *sc) 4962 { 4963 struct iwn_calib_state *calib = &sc->calib; 4964 struct iwn_phy_calib_gain cmd; 4965 int i, ant, div, delta; 4966 4967 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4968 4969 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4970 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4971 4972 memset(&cmd, 0, sizeof cmd); 4973 cmd.code = sc->noise_gain; 4974 cmd.ngroups = 1; 4975 cmd.isvalid = 1; 4976 /* Get first available RX antenna as referential. */ 4977 ant = IWN_LSB(sc->rxchainmask); 4978 /* Set differential gains for other antennas. */ 4979 for (i = ant + 1; i < 3; i++) { 4980 if (sc->chainmask & (1 << i)) { 4981 /* The delta is relative to antenna "ant". */ 4982 delta = ((int32_t)calib->noise[ant] - 4983 (int32_t)calib->noise[i]) / div; 4984 /* Limit to [-4.5dB,+4.5dB]. */ 4985 cmd.gain[i - 1] = MIN(abs(delta), 3); 4986 if (delta < 0) 4987 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 4988 } 4989 } 4990 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4991 "setting differential gains Ant B/C: %x/%x (%x)\n", 4992 cmd.gain[0], cmd.gain[1], sc->chainmask); 4993 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4994 } 4995 4996 /* 4997 * Tune RF RX sensitivity based on the number of false alarms detected 4998 * during the last beacon period. 4999 */ 5000 static void 5001 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5002 { 5003 #define inc(val, inc, max) \ 5004 if ((val) < (max)) { \ 5005 if ((val) < (max) - (inc)) \ 5006 (val) += (inc); \ 5007 else \ 5008 (val) = (max); \ 5009 needs_update = 1; \ 5010 } 5011 #define dec(val, dec, min) \ 5012 if ((val) > (min)) { \ 5013 if ((val) > (min) + (dec)) \ 5014 (val) -= (dec); \ 5015 else \ 5016 (val) = (min); \ 5017 needs_update = 1; \ 5018 } 5019 5020 const struct iwn_sensitivity_limits *limits = sc->limits; 5021 struct iwn_calib_state *calib = &sc->calib; 5022 uint32_t val, rxena, fa; 5023 uint32_t energy[3], energy_min; 5024 uint8_t noise[3], noise_ref; 5025 int i, needs_update = 0; 5026 5027 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5028 5029 /* Check that we've been enabled long enough. */ 5030 if ((rxena = le32toh(stats->general.load)) == 0){ 5031 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5032 return; 5033 } 5034 5035 /* Compute number of false alarms since last call for OFDM. */ 5036 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5037 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5038 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5039 5040 /* Save counters values for next call. */ 5041 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 5042 calib->fa_ofdm = le32toh(stats->ofdm.fa); 5043 5044 if (fa > 50 * rxena) { 5045 /* High false alarm count, decrease sensitivity. */ 5046 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5047 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5048 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5049 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5050 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5051 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5052 5053 } else if (fa < 5 * rxena) { 5054 /* Low false alarm count, increase sensitivity. */ 5055 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5056 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5057 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5058 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5059 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5060 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5061 } 5062 5063 /* Compute maximum noise among 3 receivers. */ 5064 for (i = 0; i < 3; i++) 5065 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5066 val = MAX(noise[0], noise[1]); 5067 val = MAX(noise[2], val); 5068 /* Insert it into our samples table. */ 5069 calib->noise_samples[calib->cur_noise_sample] = val; 5070 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5071 5072 /* Compute maximum noise among last 20 samples. */ 5073 noise_ref = calib->noise_samples[0]; 5074 for (i = 1; i < 20; i++) 5075 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5076 5077 /* Compute maximum energy among 3 receivers. */ 5078 for (i = 0; i < 3; i++) 5079 energy[i] = le32toh(stats->general.energy[i]); 5080 val = MIN(energy[0], energy[1]); 5081 val = MIN(energy[2], val); 5082 /* Insert it into our samples table. */ 5083 calib->energy_samples[calib->cur_energy_sample] = val; 5084 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5085 5086 /* Compute minimum energy among last 10 samples. */ 5087 energy_min = calib->energy_samples[0]; 5088 for (i = 1; i < 10; i++) 5089 energy_min = MAX(energy_min, calib->energy_samples[i]); 5090 energy_min += 6; 5091 5092 /* Compute number of false alarms since last call for CCK. */ 5093 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5094 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5095 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5096 5097 /* Save counters values for next call. */ 5098 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 5099 calib->fa_cck = le32toh(stats->cck.fa); 5100 5101 if (fa > 50 * rxena) { 5102 /* High false alarm count, decrease sensitivity. */ 5103 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5104 "%s: CCK high false alarm count: %u\n", __func__, fa); 5105 calib->cck_state = IWN_CCK_STATE_HIFA; 5106 calib->low_fa = 0; 5107 5108 if (calib->cck_x4 > 160) { 5109 calib->noise_ref = noise_ref; 5110 if (calib->energy_cck > 2) 5111 dec(calib->energy_cck, 2, energy_min); 5112 } 5113 if (calib->cck_x4 < 160) { 5114 calib->cck_x4 = 161; 5115 needs_update = 1; 5116 } else 5117 inc(calib->cck_x4, 3, limits->max_cck_x4); 5118 5119 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5120 5121 } else if (fa < 5 * rxena) { 5122 /* Low false alarm count, increase sensitivity. */ 5123 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5124 "%s: CCK low false alarm count: %u\n", __func__, fa); 5125 calib->cck_state = IWN_CCK_STATE_LOFA; 5126 calib->low_fa++; 5127 5128 if (calib->cck_state != IWN_CCK_STATE_INIT && 5129 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5130 calib->low_fa > 100)) { 5131 inc(calib->energy_cck, 2, limits->min_energy_cck); 5132 dec(calib->cck_x4, 3, limits->min_cck_x4); 5133 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5134 } 5135 } else { 5136 /* Not worth to increase or decrease sensitivity. */ 5137 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5138 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5139 calib->low_fa = 0; 5140 calib->noise_ref = noise_ref; 5141 5142 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5143 /* Previous interval had many false alarms. */ 5144 dec(calib->energy_cck, 8, energy_min); 5145 } 5146 calib->cck_state = IWN_CCK_STATE_INIT; 5147 } 5148 5149 if (needs_update) 5150 (void)iwn_send_sensitivity(sc); 5151 5152 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5153 5154 #undef dec 5155 #undef inc 5156 } 5157 5158 static int 5159 iwn_send_sensitivity(struct iwn_softc *sc) 5160 { 5161 struct iwn_calib_state *calib = &sc->calib; 5162 struct iwn_enhanced_sensitivity_cmd cmd; 5163 int len; 5164 5165 memset(&cmd, 0, sizeof cmd); 5166 len = sizeof (struct iwn_sensitivity_cmd); 5167 cmd.which = IWN_SENSITIVITY_WORKTBL; 5168 /* OFDM modulation. */ 5169 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5170 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5171 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5172 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5173 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5174 cmd.energy_ofdm_th = htole16(62); 5175 /* CCK modulation. */ 5176 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5177 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5178 cmd.energy_cck = htole16(calib->energy_cck); 5179 /* Barker modulation: use default values. */ 5180 cmd.corr_barker = htole16(190); 5181 cmd.corr_barker_mrc = htole16(390); 5182 5183 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5184 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5185 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5186 calib->ofdm_mrc_x4, calib->cck_x4, 5187 calib->cck_mrc_x4, calib->energy_cck); 5188 5189 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5190 goto send; 5191 /* Enhanced sensitivity settings. */ 5192 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5193 cmd.ofdm_det_slope_mrc = htole16(668); 5194 cmd.ofdm_det_icept_mrc = htole16(4); 5195 cmd.ofdm_det_slope = htole16(486); 5196 cmd.ofdm_det_icept = htole16(37); 5197 cmd.cck_det_slope_mrc = htole16(853); 5198 cmd.cck_det_icept_mrc = htole16(4); 5199 cmd.cck_det_slope = htole16(476); 5200 cmd.cck_det_icept = htole16(99); 5201 send: 5202 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5203 } 5204 5205 /* 5206 * Set STA mode power saving level (between 0 and 5). 5207 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5208 */ 5209 static int 5210 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5211 { 5212 struct iwn_pmgt_cmd cmd; 5213 const struct iwn_pmgt *pmgt; 5214 uint32_t max, skip_dtim; 5215 uint32_t reg; 5216 int i; 5217 5218 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5219 "%s: dtim=%d, level=%d, async=%d\n", 5220 __func__, 5221 dtim, 5222 level, 5223 async); 5224 5225 /* Select which PS parameters to use. */ 5226 if (dtim <= 2) 5227 pmgt = &iwn_pmgt[0][level]; 5228 else if (dtim <= 10) 5229 pmgt = &iwn_pmgt[1][level]; 5230 else 5231 pmgt = &iwn_pmgt[2][level]; 5232 5233 memset(&cmd, 0, sizeof cmd); 5234 if (level != 0) /* not CAM */ 5235 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5236 if (level == 5) 5237 cmd.flags |= htole16(IWN_PS_FAST_PD); 5238 /* Retrieve PCIe Active State Power Management (ASPM). */ 5239 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5240 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5241 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5242 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5243 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5244 5245 if (dtim == 0) { 5246 dtim = 1; 5247 skip_dtim = 0; 5248 } else 5249 skip_dtim = pmgt->skip_dtim; 5250 if (skip_dtim != 0) { 5251 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 5252 max = pmgt->intval[4]; 5253 if (max == (uint32_t)-1) 5254 max = dtim * (skip_dtim + 1); 5255 else if (max > dtim) 5256 max = (max / dtim) * dtim; 5257 } else 5258 max = dtim; 5259 for (i = 0; i < 5; i++) 5260 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 5261 5262 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 5263 level); 5264 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 5265 } 5266 5267 static int 5268 iwn_send_btcoex(struct iwn_softc *sc) 5269 { 5270 struct iwn_bluetooth cmd; 5271 5272 memset(&cmd, 0, sizeof cmd); 5273 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 5274 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 5275 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 5276 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 5277 __func__); 5278 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 5279 } 5280 5281 static int 5282 iwn_send_advanced_btcoex(struct iwn_softc *sc) 5283 { 5284 static const uint32_t btcoex_3wire[12] = { 5285 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 5286 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 5287 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 5288 }; 5289 struct iwn6000_btcoex_config btconfig; 5290 struct iwn_btcoex_priotable btprio; 5291 struct iwn_btcoex_prot btprot; 5292 int error, i; 5293 5294 memset(&btconfig, 0, sizeof btconfig); 5295 btconfig.flags = 145; 5296 btconfig.max_kill = 5; 5297 btconfig.bt3_t7_timer = 1; 5298 btconfig.kill_ack = htole32(0xffff0000); 5299 btconfig.kill_cts = htole32(0xffff0000); 5300 btconfig.sample_time = 2; 5301 btconfig.bt3_t2_timer = 0xc; 5302 for (i = 0; i < 12; i++) 5303 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 5304 btconfig.valid = htole16(0xff); 5305 btconfig.prio_boost = 0xf0; 5306 DPRINTF(sc, IWN_DEBUG_RESET, 5307 "%s: configuring advanced bluetooth coexistence\n", __func__); 5308 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1); 5309 if (error != 0) 5310 return error; 5311 5312 memset(&btprio, 0, sizeof btprio); 5313 btprio.calib_init1 = 0x6; 5314 btprio.calib_init2 = 0x7; 5315 btprio.calib_periodic_low1 = 0x2; 5316 btprio.calib_periodic_low2 = 0x3; 5317 btprio.calib_periodic_high1 = 0x4; 5318 btprio.calib_periodic_high2 = 0x5; 5319 btprio.dtim = 0x6; 5320 btprio.scan52 = 0x8; 5321 btprio.scan24 = 0xa; 5322 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 5323 1); 5324 if (error != 0) 5325 return error; 5326 5327 /* Force BT state machine change. */ 5328 memset(&btprot, 0, sizeof btprio); 5329 btprot.open = 1; 5330 btprot.type = 1; 5331 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 5332 if (error != 0) 5333 return error; 5334 btprot.open = 0; 5335 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 5336 } 5337 5338 static int 5339 iwn5000_runtime_calib(struct iwn_softc *sc) 5340 { 5341 struct iwn5000_calib_config cmd; 5342 5343 memset(&cmd, 0, sizeof cmd); 5344 cmd.ucode.once.enable = 0xffffffff; 5345 cmd.ucode.once.start = IWN5000_CALIB_DC; 5346 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5347 "%s: configuring runtime calibration\n", __func__); 5348 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 5349 } 5350 5351 static int 5352 iwn_config(struct iwn_softc *sc) 5353 { 5354 struct iwn_ops *ops = &sc->ops; 5355 struct ifnet *ifp = sc->sc_ifp; 5356 struct ieee80211com *ic = ifp->if_l2com; 5357 uint32_t txmask; 5358 uint16_t rxchain; 5359 int error; 5360 5361 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5362 5363 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 5364 /* Set radio temperature sensor offset. */ 5365 error = iwn5000_temp_offset_calib(sc); 5366 if (error != 0) { 5367 device_printf(sc->sc_dev, 5368 "%s: could not set temperature offset\n", __func__); 5369 return error; 5370 } 5371 } 5372 5373 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5374 /* Configure runtime DC calibration. */ 5375 error = iwn5000_runtime_calib(sc); 5376 if (error != 0) { 5377 device_printf(sc->sc_dev, 5378 "%s: could not configure runtime calibration\n", 5379 __func__); 5380 return error; 5381 } 5382 } 5383 5384 /* Configure valid TX chains for >=5000 Series. */ 5385 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 5386 txmask = htole32(sc->txchainmask); 5387 DPRINTF(sc, IWN_DEBUG_RESET, 5388 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 5389 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 5390 sizeof txmask, 0); 5391 if (error != 0) { 5392 device_printf(sc->sc_dev, 5393 "%s: could not configure valid TX chains, " 5394 "error %d\n", __func__, error); 5395 return error; 5396 } 5397 } 5398 5399 /* Configure bluetooth coexistence. */ 5400 if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX) 5401 error = iwn_send_advanced_btcoex(sc); 5402 else 5403 error = iwn_send_btcoex(sc); 5404 if (error != 0) { 5405 device_printf(sc->sc_dev, 5406 "%s: could not configure bluetooth coexistence, error %d\n", 5407 __func__, error); 5408 return error; 5409 } 5410 5411 /* Set mode, channel, RX filter and enable RX. */ 5412 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 5413 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); 5414 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp)); 5415 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 5416 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5417 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 5418 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5419 switch (ic->ic_opmode) { 5420 case IEEE80211_M_STA: 5421 sc->rxon.mode = IWN_MODE_STA; 5422 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 5423 break; 5424 case IEEE80211_M_MONITOR: 5425 sc->rxon.mode = IWN_MODE_MONITOR; 5426 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 5427 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 5428 break; 5429 default: 5430 /* Should not get there. */ 5431 break; 5432 } 5433 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 5434 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 5435 sc->rxon.ht_single_mask = 0xff; 5436 sc->rxon.ht_dual_mask = 0xff; 5437 sc->rxon.ht_triple_mask = 0xff; 5438 rxchain = 5439 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5440 IWN_RXCHAIN_MIMO_COUNT(2) | 5441 IWN_RXCHAIN_IDLE_COUNT(2); 5442 sc->rxon.rxchain = htole16(rxchain); 5443 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 5444 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 5445 if (error != 0) { 5446 device_printf(sc->sc_dev, "%s: RXON command failed\n", 5447 __func__); 5448 return error; 5449 } 5450 5451 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 5452 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 5453 __func__); 5454 return error; 5455 } 5456 5457 /* Configuration has changed, set TX power accordingly. */ 5458 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 5459 device_printf(sc->sc_dev, "%s: could not set TX power\n", 5460 __func__); 5461 return error; 5462 } 5463 5464 if ((error = iwn_set_critical_temp(sc)) != 0) { 5465 device_printf(sc->sc_dev, 5466 "%s: could not set critical temperature\n", __func__); 5467 return error; 5468 } 5469 5470 /* Set power saving level to CAM during initialization. */ 5471 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 5472 device_printf(sc->sc_dev, 5473 "%s: could not set power saving level\n", __func__); 5474 return error; 5475 } 5476 5477 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5478 5479 return 0; 5480 } 5481 5482 /* 5483 * Add an ssid element to a frame. 5484 */ 5485 static uint8_t * 5486 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 5487 { 5488 *frm++ = IEEE80211_ELEMID_SSID; 5489 *frm++ = len; 5490 memcpy(frm, ssid, len); 5491 return frm + len; 5492 } 5493 5494 static int 5495 iwn_scan(struct iwn_softc *sc) 5496 { 5497 struct ifnet *ifp = sc->sc_ifp; 5498 struct ieee80211com *ic = ifp->if_l2com; 5499 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 5500 struct ieee80211_node *ni = ss->ss_vap->iv_bss; 5501 struct iwn_scan_hdr *hdr; 5502 struct iwn_cmd_data *tx; 5503 struct iwn_scan_essid *essid; 5504 struct iwn_scan_chan *chan; 5505 struct ieee80211_frame *wh; 5506 struct ieee80211_rateset *rs; 5507 struct ieee80211_channel *c; 5508 uint8_t *buf, *frm; 5509 uint16_t rxchain; 5510 uint8_t txant; 5511 int buflen, error; 5512 5513 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5514 5515 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 5516 if (buf == NULL) { 5517 device_printf(sc->sc_dev, 5518 "%s: could not allocate buffer for scan command\n", 5519 __func__); 5520 return ENOMEM; 5521 } 5522 hdr = (struct iwn_scan_hdr *)buf; 5523 /* 5524 * Move to the next channel if no frames are received within 10ms 5525 * after sending the probe request. 5526 */ 5527 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 5528 hdr->quiet_threshold = htole16(1); /* min # of packets */ 5529 5530 /* Select antennas for scanning. */ 5531 rxchain = 5532 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5533 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 5534 IWN_RXCHAIN_DRIVER_FORCE; 5535 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 5536 sc->hw_type == IWN_HW_REV_TYPE_4965) { 5537 /* Ant A must be avoided in 5GHz because of an HW bug. */ 5538 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 5539 } else /* Use all available RX antennas. */ 5540 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 5541 hdr->rxchain = htole16(rxchain); 5542 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 5543 5544 tx = (struct iwn_cmd_data *)(hdr + 1); 5545 tx->flags = htole32(IWN_TX_AUTO_SEQ); 5546 tx->id = sc->broadcast_id; 5547 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 5548 5549 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { 5550 /* Send probe requests at 6Mbps. */ 5551 tx->rate = htole32(0xd); 5552 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 5553 } else { 5554 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 5555 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 5556 sc->rxon.associd && sc->rxon.chan > 14) 5557 tx->rate = htole32(0xd); 5558 else { 5559 /* Send probe requests at 1Mbps. */ 5560 tx->rate = htole32(10 | IWN_RFLAG_CCK); 5561 } 5562 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 5563 } 5564 /* Use the first valid TX antenna. */ 5565 txant = IWN_LSB(sc->txchainmask); 5566 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 5567 5568 essid = (struct iwn_scan_essid *)(tx + 1); 5569 if (ss->ss_ssid[0].len != 0) { 5570 essid[0].id = IEEE80211_ELEMID_SSID; 5571 essid[0].len = ss->ss_ssid[0].len; 5572 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 5573 } 5574 /* 5575 * Build a probe request frame. Most of the following code is a 5576 * copy & paste of what is done in net80211. 5577 */ 5578 wh = (struct ieee80211_frame *)(essid + 20); 5579 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 5580 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 5581 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 5582 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 5583 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 5584 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 5585 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 5586 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 5587 5588 frm = (uint8_t *)(wh + 1); 5589 frm = ieee80211_add_ssid(frm, NULL, 0); 5590 frm = ieee80211_add_rates(frm, rs); 5591 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 5592 frm = ieee80211_add_xrates(frm, rs); 5593 if (ic->ic_htcaps & IEEE80211_HTC_HT) 5594 frm = ieee80211_add_htcap(frm, ni); 5595 5596 /* Set length of probe request. */ 5597 tx->len = htole16(frm - (uint8_t *)wh); 5598 5599 c = ic->ic_curchan; 5600 chan = (struct iwn_scan_chan *)frm; 5601 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 5602 chan->flags = 0; 5603 if (ss->ss_nssid > 0) 5604 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 5605 chan->dsp_gain = 0x6e; 5606 if (IEEE80211_IS_CHAN_5GHZ(c) && 5607 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5608 chan->rf_gain = 0x3b; 5609 chan->active = htole16(24); 5610 chan->passive = htole16(110); 5611 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5612 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 5613 chan->rf_gain = 0x3b; 5614 chan->active = htole16(24); 5615 if (sc->rxon.associd) 5616 chan->passive = htole16(78); 5617 else 5618 chan->passive = htole16(110); 5619 hdr->crc_threshold = 0xffff; 5620 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5621 chan->rf_gain = 0x28; 5622 chan->active = htole16(36); 5623 chan->passive = htole16(120); 5624 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5625 } else { 5626 chan->rf_gain = 0x28; 5627 chan->active = htole16(36); 5628 if (sc->rxon.associd) 5629 chan->passive = htole16(88); 5630 else 5631 chan->passive = htole16(120); 5632 hdr->crc_threshold = 0xffff; 5633 } 5634 5635 DPRINTF(sc, IWN_DEBUG_STATE, 5636 "%s: chan %u flags 0x%x rf_gain 0x%x " 5637 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 5638 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 5639 chan->active, chan->passive); 5640 5641 hdr->nchan++; 5642 chan++; 5643 buflen = (uint8_t *)chan - buf; 5644 hdr->len = htole16(buflen); 5645 5646 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 5647 hdr->nchan); 5648 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 5649 free(buf, M_DEVBUF); 5650 5651 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5652 5653 return error; 5654 } 5655 5656 static int 5657 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 5658 { 5659 struct iwn_ops *ops = &sc->ops; 5660 struct ifnet *ifp = sc->sc_ifp; 5661 struct ieee80211com *ic = ifp->if_l2com; 5662 struct ieee80211_node *ni = vap->iv_bss; 5663 int error; 5664 5665 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5666 5667 /* Update adapter configuration. */ 5668 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 5669 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5670 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5671 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5672 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5673 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5674 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5675 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5676 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5677 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5678 sc->rxon.cck_mask = 0; 5679 sc->rxon.ofdm_mask = 0x15; 5680 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5681 sc->rxon.cck_mask = 0x03; 5682 sc->rxon.ofdm_mask = 0; 5683 } else { 5684 /* Assume 802.11b/g. */ 5685 sc->rxon.cck_mask = 0x0f; 5686 sc->rxon.ofdm_mask = 0x15; 5687 } 5688 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 5689 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 5690 sc->rxon.ofdm_mask); 5691 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5692 if (error != 0) { 5693 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 5694 __func__, error); 5695 return error; 5696 } 5697 5698 /* Configuration has changed, set TX power accordingly. */ 5699 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5700 device_printf(sc->sc_dev, 5701 "%s: could not set TX power, error %d\n", __func__, error); 5702 return error; 5703 } 5704 /* 5705 * Reconfiguring RXON clears the firmware nodes table so we must 5706 * add the broadcast node again. 5707 */ 5708 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 5709 device_printf(sc->sc_dev, 5710 "%s: could not add broadcast node, error %d\n", __func__, 5711 error); 5712 return error; 5713 } 5714 5715 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5716 5717 return 0; 5718 } 5719 5720 static int 5721 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 5722 { 5723 struct iwn_ops *ops = &sc->ops; 5724 struct ifnet *ifp = sc->sc_ifp; 5725 struct ieee80211com *ic = ifp->if_l2com; 5726 struct ieee80211_node *ni = vap->iv_bss; 5727 struct iwn_node_info node; 5728 uint32_t htflags = 0; 5729 int error; 5730 5731 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5732 5733 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5734 /* Link LED blinks while monitoring. */ 5735 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 5736 return 0; 5737 } 5738 if ((error = iwn_set_timing(sc, ni)) != 0) { 5739 device_printf(sc->sc_dev, 5740 "%s: could not set timing, error %d\n", __func__, error); 5741 return error; 5742 } 5743 5744 /* Update adapter configuration. */ 5745 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 5746 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 5747 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5748 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5749 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5750 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5751 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5752 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5753 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5754 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5755 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5756 sc->rxon.cck_mask = 0; 5757 sc->rxon.ofdm_mask = 0x15; 5758 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5759 sc->rxon.cck_mask = 0x03; 5760 sc->rxon.ofdm_mask = 0; 5761 } else { 5762 /* Assume 802.11b/g. */ 5763 sc->rxon.cck_mask = 0x0f; 5764 sc->rxon.ofdm_mask = 0x15; 5765 } 5766 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5767 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 5768 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 5769 switch (ic->ic_curhtprotmode) { 5770 case IEEE80211_HTINFO_OPMODE_HT20PR: 5771 htflags |= IWN_RXON_HT_MODEPURE40; 5772 break; 5773 default: 5774 htflags |= IWN_RXON_HT_MODEMIXED; 5775 break; 5776 } 5777 } 5778 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 5779 htflags |= IWN_RXON_HT_HT40MINUS; 5780 } 5781 sc->rxon.flags |= htole32(htflags); 5782 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 5783 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 5784 sc->rxon.chan, sc->rxon.flags); 5785 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5786 if (error != 0) { 5787 device_printf(sc->sc_dev, 5788 "%s: could not update configuration, error %d\n", __func__, 5789 error); 5790 return error; 5791 } 5792 5793 /* Configuration has changed, set TX power accordingly. */ 5794 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5795 device_printf(sc->sc_dev, 5796 "%s: could not set TX power, error %d\n", __func__, error); 5797 return error; 5798 } 5799 5800 /* Fake a join to initialize the TX rate. */ 5801 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 5802 iwn_newassoc(ni, 1); 5803 5804 /* Add BSS node. */ 5805 memset(&node, 0, sizeof node); 5806 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 5807 node.id = IWN_ID_BSS; 5808 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5809 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 5810 case IEEE80211_HTCAP_SMPS_ENA: 5811 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 5812 break; 5813 case IEEE80211_HTCAP_SMPS_DYNAMIC: 5814 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 5815 break; 5816 } 5817 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 5818 IWN_AMDPU_DENSITY(5)); /* 4us */ 5819 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 5820 node.htflags |= htole32(IWN_NODE_HT40); 5821 } 5822 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 5823 error = ops->add_node(sc, &node, 1); 5824 if (error != 0) { 5825 device_printf(sc->sc_dev, 5826 "%s: could not add BSS node, error %d\n", __func__, error); 5827 return error; 5828 } 5829 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 5830 __func__, node.id); 5831 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 5832 device_printf(sc->sc_dev, 5833 "%s: could not setup link quality for node %d, error %d\n", 5834 __func__, node.id, error); 5835 return error; 5836 } 5837 5838 if ((error = iwn_init_sensitivity(sc)) != 0) { 5839 device_printf(sc->sc_dev, 5840 "%s: could not set sensitivity, error %d\n", __func__, 5841 error); 5842 return error; 5843 } 5844 /* Start periodic calibration timer. */ 5845 sc->calib.state = IWN_CALIB_STATE_ASSOC; 5846 sc->calib_cnt = 0; 5847 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 5848 sc); 5849 5850 /* Link LED always on while associated. */ 5851 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 5852 5853 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5854 5855 return 0; 5856 } 5857 5858 /* 5859 * This function is called by upper layer when an ADDBA request is received 5860 * from another STA and before the ADDBA response is sent. 5861 */ 5862 static int 5863 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 5864 int baparamset, int batimeout, int baseqctl) 5865 { 5866 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 5867 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5868 struct iwn_ops *ops = &sc->ops; 5869 struct iwn_node *wn = (void *)ni; 5870 struct iwn_node_info node; 5871 uint16_t ssn; 5872 uint8_t tid; 5873 int error; 5874 5875 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5876 5877 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 5878 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 5879 5880 memset(&node, 0, sizeof node); 5881 node.id = wn->id; 5882 node.control = IWN_NODE_UPDATE; 5883 node.flags = IWN_FLAG_SET_ADDBA; 5884 node.addba_tid = tid; 5885 node.addba_ssn = htole16(ssn); 5886 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 5887 wn->id, tid, ssn); 5888 error = ops->add_node(sc, &node, 1); 5889 if (error != 0) 5890 return error; 5891 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 5892 #undef MS 5893 } 5894 5895 /* 5896 * This function is called by upper layer on teardown of an HT-immediate 5897 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5898 */ 5899 static void 5900 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 5901 { 5902 struct ieee80211com *ic = ni->ni_ic; 5903 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5904 struct iwn_ops *ops = &sc->ops; 5905 struct iwn_node *wn = (void *)ni; 5906 struct iwn_node_info node; 5907 uint8_t tid; 5908 5909 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5910 5911 /* XXX: tid as an argument */ 5912 for (tid = 0; tid < WME_NUM_TID; tid++) { 5913 if (&ni->ni_rx_ampdu[tid] == rap) 5914 break; 5915 } 5916 5917 memset(&node, 0, sizeof node); 5918 node.id = wn->id; 5919 node.control = IWN_NODE_UPDATE; 5920 node.flags = IWN_FLAG_SET_DELBA; 5921 node.delba_tid = tid; 5922 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5923 (void)ops->add_node(sc, &node, 1); 5924 sc->sc_ampdu_rx_stop(ni, rap); 5925 } 5926 5927 static int 5928 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5929 int dialogtoken, int baparamset, int batimeout) 5930 { 5931 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5932 int qid; 5933 5934 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5935 5936 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 5937 if (sc->qid2tap[qid] == NULL) 5938 break; 5939 } 5940 if (qid == sc->ntxqs) { 5941 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 5942 __func__); 5943 return 0; 5944 } 5945 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 5946 if (tap->txa_private == NULL) { 5947 device_printf(sc->sc_dev, 5948 "%s: failed to alloc TX aggregation structure\n", __func__); 5949 return 0; 5950 } 5951 sc->qid2tap[qid] = tap; 5952 *(int *)tap->txa_private = qid; 5953 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5954 batimeout); 5955 } 5956 5957 static int 5958 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5959 int code, int baparamset, int batimeout) 5960 { 5961 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5962 int qid = *(int *)tap->txa_private; 5963 uint8_t tid = tap->txa_tid; 5964 int ret; 5965 5966 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5967 5968 if (code == IEEE80211_STATUS_SUCCESS) { 5969 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 5970 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 5971 if (ret != 1) 5972 return ret; 5973 } else { 5974 sc->qid2tap[qid] = NULL; 5975 free(tap->txa_private, M_DEVBUF); 5976 tap->txa_private = NULL; 5977 } 5978 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 5979 } 5980 5981 /* 5982 * This function is called by upper layer when an ADDBA response is received 5983 * from another STA. 5984 */ 5985 static int 5986 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5987 uint8_t tid) 5988 { 5989 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 5990 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5991 struct iwn_ops *ops = &sc->ops; 5992 struct iwn_node *wn = (void *)ni; 5993 struct iwn_node_info node; 5994 int error, qid; 5995 5996 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5997 5998 /* Enable TX for the specified RA/TID. */ 5999 wn->disable_tid &= ~(1 << tid); 6000 memset(&node, 0, sizeof node); 6001 node.id = wn->id; 6002 node.control = IWN_NODE_UPDATE; 6003 node.flags = IWN_FLAG_SET_DISABLE_TID; 6004 node.disable_tid = htole16(wn->disable_tid); 6005 error = ops->add_node(sc, &node, 1); 6006 if (error != 0) 6007 return 0; 6008 6009 if ((error = iwn_nic_lock(sc)) != 0) 6010 return 0; 6011 qid = *(int *)tap->txa_private; 6012 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 6013 __func__, wn->id, tid, tap->txa_start, qid); 6014 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 6015 iwn_nic_unlock(sc); 6016 6017 iwn_set_link_quality(sc, ni); 6018 return 1; 6019 } 6020 6021 static void 6022 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6023 { 6024 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6025 struct iwn_ops *ops = &sc->ops; 6026 uint8_t tid = tap->txa_tid; 6027 int qid; 6028 6029 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6030 6031 sc->sc_addba_stop(ni, tap); 6032 6033 if (tap->txa_private == NULL) 6034 return; 6035 6036 qid = *(int *)tap->txa_private; 6037 if (sc->txq[qid].queued != 0) 6038 return; 6039 if (iwn_nic_lock(sc) != 0) 6040 return; 6041 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 6042 iwn_nic_unlock(sc); 6043 sc->qid2tap[qid] = NULL; 6044 free(tap->txa_private, M_DEVBUF); 6045 tap->txa_private = NULL; 6046 } 6047 6048 static void 6049 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6050 int qid, uint8_t tid, uint16_t ssn) 6051 { 6052 struct iwn_node *wn = (void *)ni; 6053 6054 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6055 6056 /* Stop TX scheduler while we're changing its configuration. */ 6057 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6058 IWN4965_TXQ_STATUS_CHGACT); 6059 6060 /* Assign RA/TID translation to the queue. */ 6061 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 6062 wn->id << 4 | tid); 6063 6064 /* Enable chain-building mode for the queue. */ 6065 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 6066 6067 /* Set starting sequence number from the ADDBA request. */ 6068 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6069 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6070 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6071 6072 /* Set scheduler window size. */ 6073 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 6074 IWN_SCHED_WINSZ); 6075 /* Set scheduler frame limit. */ 6076 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6077 IWN_SCHED_LIMIT << 16); 6078 6079 /* Enable interrupts for the queue. */ 6080 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6081 6082 /* Mark the queue as active. */ 6083 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6084 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 6085 iwn_tid2fifo[tid] << 1); 6086 } 6087 6088 static void 6089 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6090 { 6091 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6092 6093 /* Stop TX scheduler while we're changing its configuration. */ 6094 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6095 IWN4965_TXQ_STATUS_CHGACT); 6096 6097 /* Set starting sequence number from the ADDBA request. */ 6098 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6099 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6100 6101 /* Disable interrupts for the queue. */ 6102 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6103 6104 /* Mark the queue as inactive. */ 6105 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6106 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 6107 } 6108 6109 static void 6110 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6111 int qid, uint8_t tid, uint16_t ssn) 6112 { 6113 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6114 6115 struct iwn_node *wn = (void *)ni; 6116 6117 /* Stop TX scheduler while we're changing its configuration. */ 6118 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6119 IWN5000_TXQ_STATUS_CHGACT); 6120 6121 /* Assign RA/TID translation to the queue. */ 6122 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 6123 wn->id << 4 | tid); 6124 6125 /* Enable chain-building mode for the queue. */ 6126 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 6127 6128 /* Enable aggregation for the queue. */ 6129 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6130 6131 /* Set starting sequence number from the ADDBA request. */ 6132 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6133 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6134 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6135 6136 /* Set scheduler window size and frame limit. */ 6137 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6138 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6139 6140 /* Enable interrupts for the queue. */ 6141 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6142 6143 /* Mark the queue as active. */ 6144 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6145 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 6146 } 6147 6148 static void 6149 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6150 { 6151 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6152 6153 /* Stop TX scheduler while we're changing its configuration. */ 6154 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6155 IWN5000_TXQ_STATUS_CHGACT); 6156 6157 /* Disable aggregation for the queue. */ 6158 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6159 6160 /* Set starting sequence number from the ADDBA request. */ 6161 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6162 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6163 6164 /* Disable interrupts for the queue. */ 6165 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6166 6167 /* Mark the queue as inactive. */ 6168 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6169 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 6170 } 6171 6172 /* 6173 * Query calibration tables from the initialization firmware. We do this 6174 * only once at first boot. Called from a process context. 6175 */ 6176 static int 6177 iwn5000_query_calibration(struct iwn_softc *sc) 6178 { 6179 struct iwn5000_calib_config cmd; 6180 int error; 6181 6182 memset(&cmd, 0, sizeof cmd); 6183 cmd.ucode.once.enable = 0xffffffff; 6184 cmd.ucode.once.start = 0xffffffff; 6185 cmd.ucode.once.send = 0xffffffff; 6186 cmd.ucode.flags = 0xffffffff; 6187 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 6188 __func__); 6189 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 6190 if (error != 0) 6191 return error; 6192 6193 /* Wait at most two seconds for calibration to complete. */ 6194 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 6195 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 6196 return error; 6197 } 6198 6199 /* 6200 * Send calibration results to the runtime firmware. These results were 6201 * obtained on first boot from the initialization firmware. 6202 */ 6203 static int 6204 iwn5000_send_calibration(struct iwn_softc *sc) 6205 { 6206 int idx, error; 6207 6208 for (idx = 0; idx < 5; idx++) { 6209 if (sc->calibcmd[idx].buf == NULL) 6210 continue; /* No results available. */ 6211 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6212 "send calibration result idx=%d len=%d\n", idx, 6213 sc->calibcmd[idx].len); 6214 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 6215 sc->calibcmd[idx].len, 0); 6216 if (error != 0) { 6217 device_printf(sc->sc_dev, 6218 "%s: could not send calibration result, error %d\n", 6219 __func__, error); 6220 return error; 6221 } 6222 } 6223 return 0; 6224 } 6225 6226 static int 6227 iwn5000_send_wimax_coex(struct iwn_softc *sc) 6228 { 6229 struct iwn5000_wimax_coex wimax; 6230 6231 #ifdef notyet 6232 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6233 /* Enable WiMAX coexistence for combo adapters. */ 6234 wimax.flags = 6235 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 6236 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 6237 IWN_WIMAX_COEX_STA_TABLE_VALID | 6238 IWN_WIMAX_COEX_ENABLE; 6239 memcpy(wimax.events, iwn6050_wimax_events, 6240 sizeof iwn6050_wimax_events); 6241 } else 6242 #endif 6243 { 6244 /* Disable WiMAX coexistence. */ 6245 wimax.flags = 0; 6246 memset(wimax.events, 0, sizeof wimax.events); 6247 } 6248 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 6249 __func__); 6250 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 6251 } 6252 6253 static int 6254 iwn5000_crystal_calib(struct iwn_softc *sc) 6255 { 6256 struct iwn5000_phy_calib_crystal cmd; 6257 6258 memset(&cmd, 0, sizeof cmd); 6259 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 6260 cmd.ngroups = 1; 6261 cmd.isvalid = 1; 6262 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 6263 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 6264 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 6265 cmd.cap_pin[0], cmd.cap_pin[1]); 6266 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6267 } 6268 6269 static int 6270 iwn5000_temp_offset_calib(struct iwn_softc *sc) 6271 { 6272 struct iwn5000_phy_calib_temp_offset cmd; 6273 6274 memset(&cmd, 0, sizeof cmd); 6275 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 6276 cmd.ngroups = 1; 6277 cmd.isvalid = 1; 6278 if (sc->eeprom_temp != 0) 6279 cmd.offset = htole16(sc->eeprom_temp); 6280 else 6281 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 6282 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 6283 le16toh(cmd.offset)); 6284 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6285 } 6286 6287 /* 6288 * This function is called after the runtime firmware notifies us of its 6289 * readiness (called in a process context). 6290 */ 6291 static int 6292 iwn4965_post_alive(struct iwn_softc *sc) 6293 { 6294 int error, qid; 6295 6296 if ((error = iwn_nic_lock(sc)) != 0) 6297 return error; 6298 6299 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6300 6301 /* Clear TX scheduler state in SRAM. */ 6302 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6303 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 6304 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 6305 6306 /* Set physical address of TX scheduler rings (1KB aligned). */ 6307 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6308 6309 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6310 6311 /* Disable chain mode for all our 16 queues. */ 6312 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 6313 6314 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 6315 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 6316 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6317 6318 /* Set scheduler window size. */ 6319 iwn_mem_write(sc, sc->sched_base + 6320 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 6321 /* Set scheduler frame limit. */ 6322 iwn_mem_write(sc, sc->sched_base + 6323 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6324 IWN_SCHED_LIMIT << 16); 6325 } 6326 6327 /* Enable interrupts for all our 16 queues. */ 6328 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 6329 /* Identify TX FIFO rings (0-7). */ 6330 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 6331 6332 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6333 for (qid = 0; qid < 7; qid++) { 6334 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 6335 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6336 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 6337 } 6338 iwn_nic_unlock(sc); 6339 return 0; 6340 } 6341 6342 /* 6343 * This function is called after the initialization or runtime firmware 6344 * notifies us of its readiness (called in a process context). 6345 */ 6346 static int 6347 iwn5000_post_alive(struct iwn_softc *sc) 6348 { 6349 int error, qid; 6350 6351 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6352 6353 /* Switch to using ICT interrupt mode. */ 6354 iwn5000_ict_reset(sc); 6355 6356 if ((error = iwn_nic_lock(sc)) != 0){ 6357 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 6358 return error; 6359 } 6360 6361 /* Clear TX scheduler state in SRAM. */ 6362 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6363 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 6364 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 6365 6366 /* Set physical address of TX scheduler rings (1KB aligned). */ 6367 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6368 6369 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6370 6371 /* Enable chain mode for all queues, except command queue. */ 6372 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 6373 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 6374 6375 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 6376 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 6377 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6378 6379 iwn_mem_write(sc, sc->sched_base + 6380 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 6381 /* Set scheduler window size and frame limit. */ 6382 iwn_mem_write(sc, sc->sched_base + 6383 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6384 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6385 } 6386 6387 /* Enable interrupts for all our 20 queues. */ 6388 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 6389 /* Identify TX FIFO rings (0-7). */ 6390 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 6391 6392 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6393 for (qid = 0; qid < 7; qid++) { 6394 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 6395 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6396 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 6397 } 6398 iwn_nic_unlock(sc); 6399 6400 /* Configure WiMAX coexistence for combo adapters. */ 6401 error = iwn5000_send_wimax_coex(sc); 6402 if (error != 0) { 6403 device_printf(sc->sc_dev, 6404 "%s: could not configure WiMAX coexistence, error %d\n", 6405 __func__, error); 6406 return error; 6407 } 6408 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 6409 /* Perform crystal calibration. */ 6410 error = iwn5000_crystal_calib(sc); 6411 if (error != 0) { 6412 device_printf(sc->sc_dev, 6413 "%s: crystal calibration failed, error %d\n", 6414 __func__, error); 6415 return error; 6416 } 6417 } 6418 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 6419 /* Query calibration from the initialization firmware. */ 6420 if ((error = iwn5000_query_calibration(sc)) != 0) { 6421 device_printf(sc->sc_dev, 6422 "%s: could not query calibration, error %d\n", 6423 __func__, error); 6424 return error; 6425 } 6426 /* 6427 * We have the calibration results now, reboot with the 6428 * runtime firmware (call ourselves recursively!) 6429 */ 6430 iwn_hw_stop(sc); 6431 error = iwn_hw_init(sc); 6432 } else { 6433 /* Send calibration results to runtime firmware. */ 6434 error = iwn5000_send_calibration(sc); 6435 } 6436 6437 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6438 6439 return error; 6440 } 6441 6442 /* 6443 * The firmware boot code is small and is intended to be copied directly into 6444 * the NIC internal memory (no DMA transfer). 6445 */ 6446 static int 6447 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 6448 { 6449 int error, ntries; 6450 6451 size /= sizeof (uint32_t); 6452 6453 if ((error = iwn_nic_lock(sc)) != 0) 6454 return error; 6455 6456 /* Copy microcode image into NIC memory. */ 6457 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 6458 (const uint32_t *)ucode, size); 6459 6460 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 6461 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 6462 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 6463 6464 /* Start boot load now. */ 6465 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 6466 6467 /* Wait for transfer to complete. */ 6468 for (ntries = 0; ntries < 1000; ntries++) { 6469 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 6470 IWN_BSM_WR_CTRL_START)) 6471 break; 6472 DELAY(10); 6473 } 6474 if (ntries == 1000) { 6475 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6476 __func__); 6477 iwn_nic_unlock(sc); 6478 return ETIMEDOUT; 6479 } 6480 6481 /* Enable boot after power up. */ 6482 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 6483 6484 iwn_nic_unlock(sc); 6485 return 0; 6486 } 6487 6488 static int 6489 iwn4965_load_firmware(struct iwn_softc *sc) 6490 { 6491 struct iwn_fw_info *fw = &sc->fw; 6492 struct iwn_dma_info *dma = &sc->fw_dma; 6493 int error; 6494 6495 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 6496 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 6497 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6498 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6499 fw->init.text, fw->init.textsz); 6500 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6501 6502 /* Tell adapter where to find initialization sections. */ 6503 if ((error = iwn_nic_lock(sc)) != 0) 6504 return error; 6505 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6506 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 6507 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6508 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6509 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 6510 iwn_nic_unlock(sc); 6511 6512 /* Load firmware boot code. */ 6513 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 6514 if (error != 0) { 6515 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6516 __func__); 6517 return error; 6518 } 6519 /* Now press "execute". */ 6520 IWN_WRITE(sc, IWN_RESET, 0); 6521 6522 /* Wait at most one second for first alive notification. */ 6523 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 6524 device_printf(sc->sc_dev, 6525 "%s: timeout waiting for adapter to initialize, error %d\n", 6526 __func__, error); 6527 return error; 6528 } 6529 6530 /* Retrieve current temperature for initial TX power calibration. */ 6531 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 6532 sc->temp = iwn4965_get_temperature(sc); 6533 6534 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 6535 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 6536 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6537 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6538 fw->main.text, fw->main.textsz); 6539 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6540 6541 /* Tell adapter where to find runtime sections. */ 6542 if ((error = iwn_nic_lock(sc)) != 0) 6543 return error; 6544 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6545 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 6546 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6547 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6548 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 6549 IWN_FW_UPDATED | fw->main.textsz); 6550 iwn_nic_unlock(sc); 6551 6552 return 0; 6553 } 6554 6555 static int 6556 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 6557 const uint8_t *section, int size) 6558 { 6559 struct iwn_dma_info *dma = &sc->fw_dma; 6560 int error; 6561 6562 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6563 6564 /* Copy firmware section into pre-allocated DMA-safe memory. */ 6565 memcpy(dma->vaddr, section, size); 6566 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6567 6568 if ((error = iwn_nic_lock(sc)) != 0) 6569 return error; 6570 6571 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6572 IWN_FH_TX_CONFIG_DMA_PAUSE); 6573 6574 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 6575 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 6576 IWN_LOADDR(dma->paddr)); 6577 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 6578 IWN_HIADDR(dma->paddr) << 28 | size); 6579 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 6580 IWN_FH_TXBUF_STATUS_TBNUM(1) | 6581 IWN_FH_TXBUF_STATUS_TBIDX(1) | 6582 IWN_FH_TXBUF_STATUS_TFBD_VALID); 6583 6584 /* Kick Flow Handler to start DMA transfer. */ 6585 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6586 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 6587 6588 iwn_nic_unlock(sc); 6589 6590 /* Wait at most five seconds for FH DMA transfer to complete. */ 6591 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 6592 } 6593 6594 static int 6595 iwn5000_load_firmware(struct iwn_softc *sc) 6596 { 6597 struct iwn_fw_part *fw; 6598 int error; 6599 6600 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6601 6602 /* Load the initialization firmware on first boot only. */ 6603 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 6604 &sc->fw.main : &sc->fw.init; 6605 6606 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 6607 fw->text, fw->textsz); 6608 if (error != 0) { 6609 device_printf(sc->sc_dev, 6610 "%s: could not load firmware %s section, error %d\n", 6611 __func__, ".text", error); 6612 return error; 6613 } 6614 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 6615 fw->data, fw->datasz); 6616 if (error != 0) { 6617 device_printf(sc->sc_dev, 6618 "%s: could not load firmware %s section, error %d\n", 6619 __func__, ".data", error); 6620 return error; 6621 } 6622 6623 /* Now press "execute". */ 6624 IWN_WRITE(sc, IWN_RESET, 0); 6625 return 0; 6626 } 6627 6628 /* 6629 * Extract text and data sections from a legacy firmware image. 6630 */ 6631 static int 6632 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 6633 { 6634 const uint32_t *ptr; 6635 size_t hdrlen = 24; 6636 uint32_t rev; 6637 6638 ptr = (const uint32_t *)fw->data; 6639 rev = le32toh(*ptr++); 6640 6641 /* Check firmware API version. */ 6642 if (IWN_FW_API(rev) <= 1) { 6643 device_printf(sc->sc_dev, 6644 "%s: bad firmware, need API version >=2\n", __func__); 6645 return EINVAL; 6646 } 6647 if (IWN_FW_API(rev) >= 3) { 6648 /* Skip build number (version 2 header). */ 6649 hdrlen += 4; 6650 ptr++; 6651 } 6652 if (fw->size < hdrlen) { 6653 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6654 __func__, fw->size); 6655 return EINVAL; 6656 } 6657 fw->main.textsz = le32toh(*ptr++); 6658 fw->main.datasz = le32toh(*ptr++); 6659 fw->init.textsz = le32toh(*ptr++); 6660 fw->init.datasz = le32toh(*ptr++); 6661 fw->boot.textsz = le32toh(*ptr++); 6662 6663 /* Check that all firmware sections fit. */ 6664 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 6665 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 6666 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6667 __func__, fw->size); 6668 return EINVAL; 6669 } 6670 6671 /* Get pointers to firmware sections. */ 6672 fw->main.text = (const uint8_t *)ptr; 6673 fw->main.data = fw->main.text + fw->main.textsz; 6674 fw->init.text = fw->main.data + fw->main.datasz; 6675 fw->init.data = fw->init.text + fw->init.textsz; 6676 fw->boot.text = fw->init.data + fw->init.datasz; 6677 return 0; 6678 } 6679 6680 /* 6681 * Extract text and data sections from a TLV firmware image. 6682 */ 6683 static int 6684 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 6685 uint16_t alt) 6686 { 6687 const struct iwn_fw_tlv_hdr *hdr; 6688 const struct iwn_fw_tlv *tlv; 6689 const uint8_t *ptr, *end; 6690 uint64_t altmask; 6691 uint32_t len, tmp; 6692 6693 if (fw->size < sizeof (*hdr)) { 6694 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6695 __func__, fw->size); 6696 return EINVAL; 6697 } 6698 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 6699 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 6700 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 6701 __func__, le32toh(hdr->signature)); 6702 return EINVAL; 6703 } 6704 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 6705 le32toh(hdr->build)); 6706 6707 /* 6708 * Select the closest supported alternative that is less than 6709 * or equal to the specified one. 6710 */ 6711 altmask = le64toh(hdr->altmask); 6712 while (alt > 0 && !(altmask & (1ULL << alt))) 6713 alt--; /* Downgrade. */ 6714 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 6715 6716 ptr = (const uint8_t *)(hdr + 1); 6717 end = (const uint8_t *)(fw->data + fw->size); 6718 6719 /* Parse type-length-value fields. */ 6720 while (ptr + sizeof (*tlv) <= end) { 6721 tlv = (const struct iwn_fw_tlv *)ptr; 6722 len = le32toh(tlv->len); 6723 6724 ptr += sizeof (*tlv); 6725 if (ptr + len > end) { 6726 device_printf(sc->sc_dev, 6727 "%s: firmware too short: %zu bytes\n", __func__, 6728 fw->size); 6729 return EINVAL; 6730 } 6731 /* Skip other alternatives. */ 6732 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 6733 goto next; 6734 6735 switch (le16toh(tlv->type)) { 6736 case IWN_FW_TLV_MAIN_TEXT: 6737 fw->main.text = ptr; 6738 fw->main.textsz = len; 6739 break; 6740 case IWN_FW_TLV_MAIN_DATA: 6741 fw->main.data = ptr; 6742 fw->main.datasz = len; 6743 break; 6744 case IWN_FW_TLV_INIT_TEXT: 6745 fw->init.text = ptr; 6746 fw->init.textsz = len; 6747 break; 6748 case IWN_FW_TLV_INIT_DATA: 6749 fw->init.data = ptr; 6750 fw->init.datasz = len; 6751 break; 6752 case IWN_FW_TLV_BOOT_TEXT: 6753 fw->boot.text = ptr; 6754 fw->boot.textsz = len; 6755 break; 6756 case IWN_FW_TLV_ENH_SENS: 6757 if (!len) 6758 sc->sc_flags |= IWN_FLAG_ENH_SENS; 6759 break; 6760 case IWN_FW_TLV_PHY_CALIB: 6761 tmp = htole32(*ptr); 6762 if (tmp < 253) { 6763 sc->reset_noise_gain = tmp; 6764 sc->noise_gain = tmp + 1; 6765 } 6766 break; 6767 default: 6768 DPRINTF(sc, IWN_DEBUG_RESET, 6769 "TLV type %d not handled\n", le16toh(tlv->type)); 6770 break; 6771 } 6772 next: /* TLV fields are 32-bit aligned. */ 6773 ptr += (len + 3) & ~3; 6774 } 6775 return 0; 6776 } 6777 6778 static int 6779 iwn_read_firmware(struct iwn_softc *sc) 6780 { 6781 struct iwn_fw_info *fw = &sc->fw; 6782 int error; 6783 6784 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6785 6786 IWN_UNLOCK(sc); 6787 6788 memset(fw, 0, sizeof (*fw)); 6789 6790 /* Read firmware image from filesystem. */ 6791 sc->fw_fp = firmware_get(sc->fwname); 6792 if (sc->fw_fp == NULL) { 6793 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 6794 __func__, sc->fwname); 6795 IWN_LOCK(sc); 6796 return EINVAL; 6797 } 6798 IWN_LOCK(sc); 6799 6800 fw->size = sc->fw_fp->datasize; 6801 fw->data = (const uint8_t *)sc->fw_fp->data; 6802 if (fw->size < sizeof (uint32_t)) { 6803 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6804 __func__, fw->size); 6805 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6806 sc->fw_fp = NULL; 6807 return EINVAL; 6808 } 6809 6810 /* Retrieve text and data sections. */ 6811 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 6812 error = iwn_read_firmware_leg(sc, fw); 6813 else 6814 error = iwn_read_firmware_tlv(sc, fw, 1); 6815 if (error != 0) { 6816 device_printf(sc->sc_dev, 6817 "%s: could not read firmware sections, error %d\n", 6818 __func__, error); 6819 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6820 sc->fw_fp = NULL; 6821 return error; 6822 } 6823 6824 /* Make sure text and data sections fit in hardware memory. */ 6825 if (fw->main.textsz > sc->fw_text_maxsz || 6826 fw->main.datasz > sc->fw_data_maxsz || 6827 fw->init.textsz > sc->fw_text_maxsz || 6828 fw->init.datasz > sc->fw_data_maxsz || 6829 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 6830 (fw->boot.textsz & 3) != 0) { 6831 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 6832 __func__); 6833 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6834 sc->fw_fp = NULL; 6835 return EINVAL; 6836 } 6837 6838 /* We can proceed with loading the firmware. */ 6839 return 0; 6840 } 6841 6842 static int 6843 iwn_clock_wait(struct iwn_softc *sc) 6844 { 6845 int ntries; 6846 6847 /* Set "initialization complete" bit. */ 6848 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6849 6850 /* Wait for clock stabilization. */ 6851 for (ntries = 0; ntries < 2500; ntries++) { 6852 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 6853 return 0; 6854 DELAY(10); 6855 } 6856 device_printf(sc->sc_dev, 6857 "%s: timeout waiting for clock stabilization\n", __func__); 6858 return ETIMEDOUT; 6859 } 6860 6861 static int 6862 iwn_apm_init(struct iwn_softc *sc) 6863 { 6864 uint32_t reg; 6865 int error; 6866 6867 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6868 6869 /* Disable L0s exit timer (NMI bug workaround). */ 6870 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6871 /* Don't wait for ICH L0s (ICH bug workaround). */ 6872 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6873 6874 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6875 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6876 6877 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6878 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6879 6880 /* Retrieve PCIe Active State Power Management (ASPM). */ 6881 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6882 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6883 if (reg & 0x02) /* L1 Entry enabled. */ 6884 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6885 else 6886 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6887 6888 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6889 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6890 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6891 6892 /* Wait for clock stabilization before accessing prph. */ 6893 if ((error = iwn_clock_wait(sc)) != 0) 6894 return error; 6895 6896 if ((error = iwn_nic_lock(sc)) != 0) 6897 return error; 6898 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6899 /* Enable DMA and BSM (Bootstrap State Machine). */ 6900 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6901 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6902 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6903 } else { 6904 /* Enable DMA. */ 6905 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6906 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6907 } 6908 DELAY(20); 6909 /* Disable L1-Active. */ 6910 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6911 iwn_nic_unlock(sc); 6912 6913 return 0; 6914 } 6915 6916 static void 6917 iwn_apm_stop_master(struct iwn_softc *sc) 6918 { 6919 int ntries; 6920 6921 /* Stop busmaster DMA activity. */ 6922 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6923 for (ntries = 0; ntries < 100; ntries++) { 6924 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6925 return; 6926 DELAY(10); 6927 } 6928 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 6929 } 6930 6931 static void 6932 iwn_apm_stop(struct iwn_softc *sc) 6933 { 6934 iwn_apm_stop_master(sc); 6935 6936 /* Reset the entire device. */ 6937 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6938 DELAY(10); 6939 /* Clear "initialization complete" bit. */ 6940 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6941 } 6942 6943 static int 6944 iwn4965_nic_config(struct iwn_softc *sc) 6945 { 6946 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6947 6948 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6949 /* 6950 * I don't believe this to be correct but this is what the 6951 * vendor driver is doing. Probably the bits should not be 6952 * shifted in IWN_RFCFG_*. 6953 */ 6954 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6955 IWN_RFCFG_TYPE(sc->rfcfg) | 6956 IWN_RFCFG_STEP(sc->rfcfg) | 6957 IWN_RFCFG_DASH(sc->rfcfg)); 6958 } 6959 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6960 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6961 return 0; 6962 } 6963 6964 static int 6965 iwn5000_nic_config(struct iwn_softc *sc) 6966 { 6967 uint32_t tmp; 6968 int error; 6969 6970 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6971 6972 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6973 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6974 IWN_RFCFG_TYPE(sc->rfcfg) | 6975 IWN_RFCFG_STEP(sc->rfcfg) | 6976 IWN_RFCFG_DASH(sc->rfcfg)); 6977 } 6978 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6979 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6980 6981 if ((error = iwn_nic_lock(sc)) != 0) 6982 return error; 6983 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6984 6985 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6986 /* 6987 * Select first Switching Voltage Regulator (1.32V) to 6988 * solve a stability issue related to noisy DC2DC line 6989 * in the silicon of 1000 Series. 6990 */ 6991 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6992 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6993 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6994 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6995 } 6996 iwn_nic_unlock(sc); 6997 6998 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6999 /* Use internal power amplifier only. */ 7000 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 7001 } 7002 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 7003 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 7004 /* Indicate that ROM calibration version is >=6. */ 7005 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 7006 } 7007 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 7008 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 7009 return 0; 7010 } 7011 7012 /* 7013 * Take NIC ownership over Intel Active Management Technology (AMT). 7014 */ 7015 static int 7016 iwn_hw_prepare(struct iwn_softc *sc) 7017 { 7018 int ntries; 7019 7020 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7021 7022 /* Check if hardware is ready. */ 7023 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7024 for (ntries = 0; ntries < 5; ntries++) { 7025 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7026 IWN_HW_IF_CONFIG_NIC_READY) 7027 return 0; 7028 DELAY(10); 7029 } 7030 7031 /* Hardware not ready, force into ready state. */ 7032 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 7033 for (ntries = 0; ntries < 15000; ntries++) { 7034 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 7035 IWN_HW_IF_CONFIG_PREPARE_DONE)) 7036 break; 7037 DELAY(10); 7038 } 7039 if (ntries == 15000) 7040 return ETIMEDOUT; 7041 7042 /* Hardware should be ready now. */ 7043 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7044 for (ntries = 0; ntries < 5; ntries++) { 7045 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7046 IWN_HW_IF_CONFIG_NIC_READY) 7047 return 0; 7048 DELAY(10); 7049 } 7050 return ETIMEDOUT; 7051 } 7052 7053 static int 7054 iwn_hw_init(struct iwn_softc *sc) 7055 { 7056 struct iwn_ops *ops = &sc->ops; 7057 int error, chnl, qid; 7058 7059 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7060 7061 /* Clear pending interrupts. */ 7062 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7063 7064 if ((error = iwn_apm_init(sc)) != 0) { 7065 device_printf(sc->sc_dev, 7066 "%s: could not power ON adapter, error %d\n", __func__, 7067 error); 7068 return error; 7069 } 7070 7071 /* Select VMAIN power source. */ 7072 if ((error = iwn_nic_lock(sc)) != 0) 7073 return error; 7074 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 7075 iwn_nic_unlock(sc); 7076 7077 /* Perform adapter-specific initialization. */ 7078 if ((error = ops->nic_config(sc)) != 0) 7079 return error; 7080 7081 /* Initialize RX ring. */ 7082 if ((error = iwn_nic_lock(sc)) != 0) 7083 return error; 7084 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 7085 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 7086 /* Set physical address of RX ring (256-byte aligned). */ 7087 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 7088 /* Set physical address of RX status (16-byte aligned). */ 7089 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 7090 /* Enable RX. */ 7091 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 7092 IWN_FH_RX_CONFIG_ENA | 7093 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 7094 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 7095 IWN_FH_RX_CONFIG_SINGLE_FRAME | 7096 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 7097 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 7098 iwn_nic_unlock(sc); 7099 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 7100 7101 if ((error = iwn_nic_lock(sc)) != 0) 7102 return error; 7103 7104 /* Initialize TX scheduler. */ 7105 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7106 7107 /* Set physical address of "keep warm" page (16-byte aligned). */ 7108 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 7109 7110 /* Initialize TX rings. */ 7111 for (qid = 0; qid < sc->ntxqs; qid++) { 7112 struct iwn_tx_ring *txq = &sc->txq[qid]; 7113 7114 /* Set physical address of TX ring (256-byte aligned). */ 7115 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 7116 txq->desc_dma.paddr >> 8); 7117 } 7118 iwn_nic_unlock(sc); 7119 7120 /* Enable DMA channels. */ 7121 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7122 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 7123 IWN_FH_TX_CONFIG_DMA_ENA | 7124 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 7125 } 7126 7127 /* Clear "radio off" and "commands blocked" bits. */ 7128 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7129 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 7130 7131 /* Clear pending interrupts. */ 7132 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7133 /* Enable interrupt coalescing. */ 7134 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 7135 /* Enable interrupts. */ 7136 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7137 7138 /* _Really_ make sure "radio off" bit is cleared! */ 7139 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7140 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7141 7142 /* Enable shadow registers. */ 7143 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 7144 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 7145 7146 if ((error = ops->load_firmware(sc)) != 0) { 7147 device_printf(sc->sc_dev, 7148 "%s: could not load firmware, error %d\n", __func__, 7149 error); 7150 return error; 7151 } 7152 /* Wait at most one second for firmware alive notification. */ 7153 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7154 device_printf(sc->sc_dev, 7155 "%s: timeout waiting for adapter to initialize, error %d\n", 7156 __func__, error); 7157 return error; 7158 } 7159 /* Do post-firmware initialization. */ 7160 7161 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7162 7163 return ops->post_alive(sc); 7164 } 7165 7166 static void 7167 iwn_hw_stop(struct iwn_softc *sc) 7168 { 7169 int chnl, qid, ntries; 7170 7171 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7172 7173 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 7174 7175 /* Disable interrupts. */ 7176 IWN_WRITE(sc, IWN_INT_MASK, 0); 7177 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7178 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 7179 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7180 7181 /* Make sure we no longer hold the NIC lock. */ 7182 iwn_nic_unlock(sc); 7183 7184 /* Stop TX scheduler. */ 7185 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7186 7187 /* Stop all DMA channels. */ 7188 if (iwn_nic_lock(sc) == 0) { 7189 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7190 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 7191 for (ntries = 0; ntries < 200; ntries++) { 7192 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 7193 IWN_FH_TX_STATUS_IDLE(chnl)) 7194 break; 7195 DELAY(10); 7196 } 7197 } 7198 iwn_nic_unlock(sc); 7199 } 7200 7201 /* Stop RX ring. */ 7202 iwn_reset_rx_ring(sc, &sc->rxq); 7203 7204 /* Reset all TX rings. */ 7205 for (qid = 0; qid < sc->ntxqs; qid++) 7206 iwn_reset_tx_ring(sc, &sc->txq[qid]); 7207 7208 if (iwn_nic_lock(sc) == 0) { 7209 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 7210 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7211 iwn_nic_unlock(sc); 7212 } 7213 DELAY(5); 7214 /* Power OFF adapter. */ 7215 iwn_apm_stop(sc); 7216 } 7217 7218 static void 7219 iwn_radio_on(void *arg0, int pending) 7220 { 7221 struct iwn_softc *sc = arg0; 7222 struct ifnet *ifp = sc->sc_ifp; 7223 struct ieee80211com *ic = ifp->if_l2com; 7224 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7225 7226 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7227 7228 if (vap != NULL) { 7229 iwn_init(sc); 7230 ieee80211_init(vap); 7231 } 7232 } 7233 7234 static void 7235 iwn_radio_off(void *arg0, int pending) 7236 { 7237 struct iwn_softc *sc = arg0; 7238 struct ifnet *ifp = sc->sc_ifp; 7239 struct ieee80211com *ic = ifp->if_l2com; 7240 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7241 7242 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7243 7244 iwn_stop(sc); 7245 if (vap != NULL) 7246 ieee80211_stop(vap); 7247 7248 /* Enable interrupts to get RF toggle notification. */ 7249 IWN_LOCK(sc); 7250 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7251 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7252 IWN_UNLOCK(sc); 7253 } 7254 7255 static void 7256 iwn_init_locked(struct iwn_softc *sc) 7257 { 7258 struct ifnet *ifp = sc->sc_ifp; 7259 int error; 7260 7261 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7262 7263 IWN_LOCK_ASSERT(sc); 7264 7265 if ((error = iwn_hw_prepare(sc)) != 0) { 7266 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 7267 __func__, error); 7268 goto fail; 7269 } 7270 7271 /* Initialize interrupt mask to default value. */ 7272 sc->int_mask = IWN_INT_MASK_DEF; 7273 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7274 7275 /* Check that the radio is not disabled by hardware switch. */ 7276 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 7277 device_printf(sc->sc_dev, 7278 "radio is disabled by hardware switch\n"); 7279 /* Enable interrupts to get RF toggle notifications. */ 7280 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7281 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7282 return; 7283 } 7284 7285 /* Read firmware images from the filesystem. */ 7286 if ((error = iwn_read_firmware(sc)) != 0) { 7287 device_printf(sc->sc_dev, 7288 "%s: could not read firmware, error %d\n", __func__, 7289 error); 7290 goto fail; 7291 } 7292 7293 /* Initialize hardware and upload firmware. */ 7294 error = iwn_hw_init(sc); 7295 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7296 sc->fw_fp = NULL; 7297 if (error != 0) { 7298 device_printf(sc->sc_dev, 7299 "%s: could not initialize hardware, error %d\n", __func__, 7300 error); 7301 goto fail; 7302 } 7303 7304 /* Configure adapter now that it is ready. */ 7305 if ((error = iwn_config(sc)) != 0) { 7306 device_printf(sc->sc_dev, 7307 "%s: could not configure device, error %d\n", __func__, 7308 error); 7309 goto fail; 7310 } 7311 7312 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 7313 ifp->if_drv_flags |= IFF_DRV_RUNNING; 7314 7315 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 7316 7317 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7318 7319 return; 7320 7321 fail: iwn_stop_locked(sc); 7322 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 7323 } 7324 7325 static void 7326 iwn_init(void *arg) 7327 { 7328 struct iwn_softc *sc = arg; 7329 struct ifnet *ifp = sc->sc_ifp; 7330 struct ieee80211com *ic = ifp->if_l2com; 7331 7332 IWN_LOCK(sc); 7333 iwn_init_locked(sc); 7334 IWN_UNLOCK(sc); 7335 7336 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7337 ieee80211_start_all(ic); 7338 } 7339 7340 static void 7341 iwn_stop_locked(struct iwn_softc *sc) 7342 { 7343 struct ifnet *ifp = sc->sc_ifp; 7344 7345 IWN_LOCK_ASSERT(sc); 7346 7347 sc->sc_tx_timer = 0; 7348 callout_stop(&sc->watchdog_to); 7349 callout_stop(&sc->calib_to); 7350 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 7351 7352 /* Power OFF hardware. */ 7353 iwn_hw_stop(sc); 7354 } 7355 7356 static void 7357 iwn_stop(struct iwn_softc *sc) 7358 { 7359 IWN_LOCK(sc); 7360 iwn_stop_locked(sc); 7361 IWN_UNLOCK(sc); 7362 } 7363 7364 /* 7365 * Callback from net80211 to start a scan. 7366 */ 7367 static void 7368 iwn_scan_start(struct ieee80211com *ic) 7369 { 7370 struct ifnet *ifp = ic->ic_ifp; 7371 struct iwn_softc *sc = ifp->if_softc; 7372 7373 IWN_LOCK(sc); 7374 /* make the link LED blink while we're scanning */ 7375 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 7376 IWN_UNLOCK(sc); 7377 } 7378 7379 /* 7380 * Callback from net80211 to terminate a scan. 7381 */ 7382 static void 7383 iwn_scan_end(struct ieee80211com *ic) 7384 { 7385 struct ifnet *ifp = ic->ic_ifp; 7386 struct iwn_softc *sc = ifp->if_softc; 7387 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7388 7389 IWN_LOCK(sc); 7390 if (vap->iv_state == IEEE80211_S_RUN) { 7391 /* Set link LED to ON status if we are associated */ 7392 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 7393 } 7394 IWN_UNLOCK(sc); 7395 } 7396 7397 /* 7398 * Callback from net80211 to force a channel change. 7399 */ 7400 static void 7401 iwn_set_channel(struct ieee80211com *ic) 7402 { 7403 const struct ieee80211_channel *c = ic->ic_curchan; 7404 struct ifnet *ifp = ic->ic_ifp; 7405 struct iwn_softc *sc = ifp->if_softc; 7406 int error; 7407 7408 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7409 7410 IWN_LOCK(sc); 7411 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 7412 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 7413 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 7414 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 7415 7416 /* 7417 * Only need to set the channel in Monitor mode. AP scanning and auth 7418 * are already taken care of by their respective firmware commands. 7419 */ 7420 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7421 error = iwn_config(sc); 7422 if (error != 0) 7423 device_printf(sc->sc_dev, 7424 "%s: error %d settting channel\n", __func__, error); 7425 } 7426 IWN_UNLOCK(sc); 7427 } 7428 7429 /* 7430 * Callback from net80211 to start scanning of the current channel. 7431 */ 7432 static void 7433 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 7434 { 7435 struct ieee80211vap *vap = ss->ss_vap; 7436 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 7437 int error; 7438 7439 IWN_LOCK(sc); 7440 error = iwn_scan(sc); 7441 IWN_UNLOCK(sc); 7442 if (error != 0) 7443 ieee80211_cancel_scan(vap); 7444 } 7445 7446 /* 7447 * Callback from net80211 to handle the minimum dwell time being met. 7448 * The intent is to terminate the scan but we just let the firmware 7449 * notify us when it's finished as we have no safe way to abort it. 7450 */ 7451 static void 7452 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 7453 { 7454 /* NB: don't try to abort scan; wait for firmware to finish */ 7455 } 7456 7457 static void 7458 iwn_hw_reset(void *arg0, int pending) 7459 { 7460 struct iwn_softc *sc = arg0; 7461 struct ifnet *ifp = sc->sc_ifp; 7462 struct ieee80211com *ic = ifp->if_l2com; 7463 7464 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7465 7466 iwn_stop(sc); 7467 iwn_init(sc); 7468 ieee80211_notify_radio(ic, 1); 7469 } 7470