1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90 /*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105 #include <sys/cdefs.h> 106 #include "opt_wlan.h" 107 #include "opt_iwm.h" 108 109 #include <sys/param.h> 110 #include <sys/bus.h> 111 #include <sys/conf.h> 112 #include <sys/endian.h> 113 #include <sys/firmware.h> 114 #include <sys/kernel.h> 115 #include <sys/malloc.h> 116 #include <sys/mbuf.h> 117 #include <sys/mutex.h> 118 #include <sys/module.h> 119 #include <sys/proc.h> 120 #include <sys/rman.h> 121 #include <sys/socket.h> 122 #include <sys/sockio.h> 123 #include <sys/sysctl.h> 124 #include <sys/linker.h> 125 126 #include <machine/bus.h> 127 #include <machine/endian.h> 128 #include <machine/resource.h> 129 130 #include <dev/pci/pcivar.h> 131 #include <dev/pci/pcireg.h> 132 133 #include <net/bpf.h> 134 135 #include <net/if.h> 136 #include <net/if_var.h> 137 #include <net/if_arp.h> 138 #include <net/if_dl.h> 139 #include <net/if_media.h> 140 #include <net/if_types.h> 141 142 #include <netinet/in.h> 143 #include <netinet/in_systm.h> 144 #include <netinet/if_ether.h> 145 #include <netinet/ip.h> 146 147 #include <net80211/ieee80211_var.h> 148 #include <net80211/ieee80211_regdomain.h> 149 #include <net80211/ieee80211_ratectl.h> 150 #include <net80211/ieee80211_radiotap.h> 151 152 #include <dev/iwm/if_iwmreg.h> 153 #include <dev/iwm/if_iwmvar.h> 154 #include <dev/iwm/if_iwm_config.h> 155 #include <dev/iwm/if_iwm_debug.h> 156 #include <dev/iwm/if_iwm_notif_wait.h> 157 #include <dev/iwm/if_iwm_util.h> 158 #include <dev/iwm/if_iwm_binding.h> 159 #include <dev/iwm/if_iwm_phy_db.h> 160 #include <dev/iwm/if_iwm_mac_ctxt.h> 161 #include <dev/iwm/if_iwm_phy_ctxt.h> 162 #include <dev/iwm/if_iwm_time_event.h> 163 #include <dev/iwm/if_iwm_power.h> 164 #include <dev/iwm/if_iwm_scan.h> 165 #include <dev/iwm/if_iwm_sf.h> 166 #include <dev/iwm/if_iwm_sta.h> 167 168 #include <dev/iwm/if_iwm_pcie_trans.h> 169 #include <dev/iwm/if_iwm_led.h> 170 #include <dev/iwm/if_iwm_fw.h> 171 172 /* From DragonflyBSD */ 173 #define mtodoff(m, t, off) ((t)((m)->m_data + (off))) 174 175 const uint8_t iwm_nvm_channels[] = { 176 /* 2.4 GHz */ 177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 178 /* 5 GHz */ 179 36, 40, 44, 48, 52, 56, 60, 64, 180 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 181 149, 153, 157, 161, 165 182 }; 183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 184 "IWM_NUM_CHANNELS is too small"); 185 186 const uint8_t iwm_nvm_channels_8000[] = { 187 /* 2.4 GHz */ 188 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 189 /* 5 GHz */ 190 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 191 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 192 149, 153, 157, 161, 165, 169, 173, 177, 181 193 }; 194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 195 "IWM_NUM_CHANNELS_8000 is too small"); 196 197 #define IWM_NUM_2GHZ_CHANNELS 14 198 #define IWM_N_HW_ADDR_MASK 0xF 199 200 const struct iwm_rate { 201 uint16_t rate; 202 uint8_t plcp; 203 uint8_t ht_plcp; 204 } iwm_rates[] = { 205 /* Legacy */ /* HT */ 206 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP }, 207 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP }, 208 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP }, 209 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP }, 210 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP }, 211 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP }, 212 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP }, 213 { 26, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_8_PLCP }, 214 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP }, 215 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP }, 216 { 52, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_9_PLCP }, 217 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP }, 218 { 78, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_10_PLCP }, 219 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP }, 220 { 104, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_11_PLCP }, 221 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP }, 222 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP }, 223 { 156, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_12_PLCP }, 224 { 208, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_13_PLCP }, 225 { 234, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_14_PLCP }, 226 { 260, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_15_PLCP }, 227 }; 228 #define IWM_RIDX_CCK 0 229 #define IWM_RIDX_OFDM 4 230 #define IWM_RIDX_MAX (nitems(iwm_rates)-1) 231 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 232 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 233 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22) 234 235 /* Convert an MCS index into an iwm_rates[] index. */ 236 const int iwm_mcs2ridx[] = { 237 IWM_RATE_MCS_0_INDEX, 238 IWM_RATE_MCS_1_INDEX, 239 IWM_RATE_MCS_2_INDEX, 240 IWM_RATE_MCS_3_INDEX, 241 IWM_RATE_MCS_4_INDEX, 242 IWM_RATE_MCS_5_INDEX, 243 IWM_RATE_MCS_6_INDEX, 244 IWM_RATE_MCS_7_INDEX, 245 IWM_RATE_MCS_8_INDEX, 246 IWM_RATE_MCS_9_INDEX, 247 IWM_RATE_MCS_10_INDEX, 248 IWM_RATE_MCS_11_INDEX, 249 IWM_RATE_MCS_12_INDEX, 250 IWM_RATE_MCS_13_INDEX, 251 IWM_RATE_MCS_14_INDEX, 252 IWM_RATE_MCS_15_INDEX, 253 }; 254 255 struct iwm_nvm_section { 256 uint16_t length; 257 uint8_t *data; 258 }; 259 260 #define IWM_UCODE_ALIVE_TIMEOUT hz 261 #define IWM_UCODE_CALIB_TIMEOUT (2*hz) 262 263 struct iwm_alive_data { 264 int valid; 265 uint32_t scd_base_addr; 266 }; 267 268 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 269 static int iwm_firmware_store_section(struct iwm_softc *, 270 enum iwm_ucode_type, 271 const uint8_t *, size_t); 272 static int iwm_set_default_calib(struct iwm_softc *, const void *); 273 static void iwm_fw_info_free(struct iwm_fw_info *); 274 static int iwm_read_firmware(struct iwm_softc *); 275 static int iwm_alloc_fwmem(struct iwm_softc *); 276 static int iwm_alloc_sched(struct iwm_softc *); 277 static int iwm_alloc_kw(struct iwm_softc *); 278 static int iwm_alloc_ict(struct iwm_softc *); 279 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 280 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 281 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 282 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 283 int); 284 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 285 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 286 static void iwm_enable_interrupts(struct iwm_softc *); 287 static void iwm_restore_interrupts(struct iwm_softc *); 288 static void iwm_disable_interrupts(struct iwm_softc *); 289 static void iwm_ict_reset(struct iwm_softc *); 290 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 291 static void iwm_stop_device(struct iwm_softc *); 292 static void iwm_nic_config(struct iwm_softc *); 293 static int iwm_nic_rx_init(struct iwm_softc *); 294 static int iwm_nic_tx_init(struct iwm_softc *); 295 static int iwm_nic_init(struct iwm_softc *); 296 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t); 297 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 298 uint16_t, uint8_t *, uint16_t *); 299 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 300 uint16_t *, uint32_t); 301 static uint32_t iwm_eeprom_channel_flags(uint16_t); 302 static void iwm_add_channel_band(struct iwm_softc *, 303 struct ieee80211_channel[], int, int *, int, size_t, 304 const uint8_t[]); 305 static void iwm_init_channel_map(struct ieee80211com *, int, int *, 306 struct ieee80211_channel[]); 307 static struct iwm_nvm_data * 308 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 309 const uint16_t *, const uint16_t *, 310 const uint16_t *, const uint16_t *, 311 const uint16_t *); 312 static void iwm_free_nvm_data(struct iwm_nvm_data *); 313 static void iwm_set_hw_address_family_8000(struct iwm_softc *, 314 struct iwm_nvm_data *, 315 const uint16_t *, 316 const uint16_t *); 317 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 318 const uint16_t *); 319 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 320 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 321 const uint16_t *); 322 static int iwm_get_n_hw_addrs(const struct iwm_softc *, 323 const uint16_t *); 324 static void iwm_set_radio_cfg(const struct iwm_softc *, 325 struct iwm_nvm_data *, uint32_t); 326 static struct iwm_nvm_data * 327 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); 328 static int iwm_nvm_init(struct iwm_softc *); 329 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, 330 const struct iwm_fw_desc *); 331 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, 332 bus_addr_t, uint32_t); 333 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 334 const struct iwm_fw_img *, 335 int, int *); 336 static int iwm_pcie_load_cpu_sections(struct iwm_softc *, 337 const struct iwm_fw_img *, 338 int, int *); 339 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, 340 const struct iwm_fw_img *); 341 static int iwm_pcie_load_given_ucode(struct iwm_softc *, 342 const struct iwm_fw_img *); 343 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *); 344 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 345 static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 346 static int iwm_load_ucode_wait_alive(struct iwm_softc *, 347 enum iwm_ucode_type); 348 static int iwm_run_init_ucode(struct iwm_softc *, int); 349 static int iwm_config_ltr(struct iwm_softc *sc); 350 static int iwm_rx_addbuf(struct iwm_softc *, int, int); 351 static void iwm_rx_rx_phy_cmd(struct iwm_softc *, 352 struct iwm_rx_packet *); 353 static int iwm_get_noise(struct iwm_softc *, 354 const struct iwm_statistics_rx_non_phy *); 355 static void iwm_handle_rx_statistics(struct iwm_softc *, 356 struct iwm_rx_packet *); 357 static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, 358 uint32_t, bool); 359 static int iwm_rx_tx_cmd_single(struct iwm_softc *, 360 struct iwm_rx_packet *, 361 struct iwm_node *); 362 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *); 363 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 364 #if 0 365 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 366 uint16_t); 367 #endif 368 static const struct iwm_rate * 369 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 370 struct mbuf *, struct iwm_tx_cmd *); 371 static int iwm_tx(struct iwm_softc *, struct mbuf *, 372 struct ieee80211_node *, int); 373 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 374 const struct ieee80211_bpf_params *); 375 static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *); 376 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 377 static struct ieee80211_node * 378 iwm_node_alloc(struct ieee80211vap *, 379 const uint8_t[IEEE80211_ADDR_LEN]); 380 static uint8_t iwm_rate_from_ucode_rate(uint32_t); 381 static int iwm_rate2ridx(struct iwm_softc *, uint8_t); 382 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int); 383 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 384 static void iwm_endscan_cb(void *, int); 385 static int iwm_send_bt_init_conf(struct iwm_softc *); 386 static boolean_t iwm_is_lar_supported(struct iwm_softc *); 387 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *); 388 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 389 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t); 390 static int iwm_init_hw(struct iwm_softc *); 391 static void iwm_init(struct iwm_softc *); 392 static void iwm_start(struct iwm_softc *); 393 static void iwm_stop(struct iwm_softc *); 394 static void iwm_watchdog(void *); 395 static void iwm_parent(struct ieee80211com *); 396 #ifdef IWM_DEBUG 397 static const char * 398 iwm_desc_lookup(uint32_t); 399 static void iwm_nic_error(struct iwm_softc *); 400 static void iwm_nic_umac_error(struct iwm_softc *); 401 #endif 402 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *); 403 static void iwm_notif_intr(struct iwm_softc *); 404 static void iwm_intr(void *); 405 static int iwm_attach(device_t); 406 static int iwm_is_valid_ether_addr(uint8_t *); 407 static void iwm_preinit(void *); 408 static int iwm_detach_local(struct iwm_softc *sc, int); 409 static void iwm_init_task(void *); 410 static void iwm_radiotap_attach(struct iwm_softc *); 411 static struct ieee80211vap * 412 iwm_vap_create(struct ieee80211com *, 413 const char [IFNAMSIZ], int, 414 enum ieee80211_opmode, int, 415 const uint8_t [IEEE80211_ADDR_LEN], 416 const uint8_t [IEEE80211_ADDR_LEN]); 417 static void iwm_vap_delete(struct ieee80211vap *); 418 static void iwm_xmit_queue_drain(struct iwm_softc *); 419 static void iwm_scan_start(struct ieee80211com *); 420 static void iwm_scan_end(struct ieee80211com *); 421 static void iwm_update_mcast(struct ieee80211com *); 422 static void iwm_set_channel(struct ieee80211com *); 423 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 424 static void iwm_scan_mindwell(struct ieee80211_scan_state *); 425 static int iwm_detach(device_t); 426 427 static int iwm_lar_disable = 0; 428 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable); 429 430 /* 431 * Firmware parser. 432 */ 433 434 static int 435 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 436 { 437 const struct iwm_fw_cscheme_list *l = (const void *)data; 438 439 if (dlen < sizeof(*l) || 440 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 441 return EINVAL; 442 443 /* we don't actually store anything for now, always use s/w crypto */ 444 445 return 0; 446 } 447 448 static int 449 iwm_firmware_store_section(struct iwm_softc *sc, 450 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 451 { 452 struct iwm_fw_img *fws; 453 struct iwm_fw_desc *fwone; 454 455 if (type >= IWM_UCODE_TYPE_MAX) 456 return EINVAL; 457 if (dlen < sizeof(uint32_t)) 458 return EINVAL; 459 460 fws = &sc->sc_fw.img[type]; 461 if (fws->fw_count >= IWM_UCODE_SECTION_MAX) 462 return EINVAL; 463 464 fwone = &fws->sec[fws->fw_count]; 465 466 /* first 32bit are device load offset */ 467 memcpy(&fwone->offset, data, sizeof(uint32_t)); 468 469 /* rest is data */ 470 fwone->data = data + sizeof(uint32_t); 471 fwone->len = dlen - sizeof(uint32_t); 472 473 fws->fw_count++; 474 475 return 0; 476 } 477 478 #define IWM_DEFAULT_SCAN_CHANNELS 40 479 480 /* iwlwifi: iwl-drv.c */ 481 struct iwm_tlv_calib_data { 482 uint32_t ucode_type; 483 struct iwm_tlv_calib_ctrl calib; 484 } __packed; 485 486 static int 487 iwm_set_default_calib(struct iwm_softc *sc, const void *data) 488 { 489 const struct iwm_tlv_calib_data *def_calib = data; 490 uint32_t ucode_type = le32toh(def_calib->ucode_type); 491 492 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 493 device_printf(sc->sc_dev, 494 "Wrong ucode_type %u for default " 495 "calibration.\n", ucode_type); 496 return EINVAL; 497 } 498 499 sc->sc_default_calib[ucode_type].flow_trigger = 500 def_calib->calib.flow_trigger; 501 sc->sc_default_calib[ucode_type].event_trigger = 502 def_calib->calib.event_trigger; 503 504 return 0; 505 } 506 507 static int 508 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, 509 struct iwm_ucode_capabilities *capa) 510 { 511 const struct iwm_ucode_api *ucode_api = (const void *)data; 512 uint32_t api_index = le32toh(ucode_api->api_index); 513 uint32_t api_flags = le32toh(ucode_api->api_flags); 514 int i; 515 516 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) { 517 device_printf(sc->sc_dev, 518 "api flags index %d larger than supported by driver\n", 519 api_index); 520 /* don't return an error so we can load FW that has more bits */ 521 return 0; 522 } 523 524 for (i = 0; i < 32; i++) { 525 if (api_flags & (1U << i)) 526 setbit(capa->enabled_api, i + 32 * api_index); 527 } 528 529 return 0; 530 } 531 532 static int 533 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, 534 struct iwm_ucode_capabilities *capa) 535 { 536 const struct iwm_ucode_capa *ucode_capa = (const void *)data; 537 uint32_t api_index = le32toh(ucode_capa->api_index); 538 uint32_t api_flags = le32toh(ucode_capa->api_capa); 539 int i; 540 541 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 542 device_printf(sc->sc_dev, 543 "capa flags index %d larger than supported by driver\n", 544 api_index); 545 /* don't return an error so we can load FW that has more bits */ 546 return 0; 547 } 548 549 for (i = 0; i < 32; i++) { 550 if (api_flags & (1U << i)) 551 setbit(capa->enabled_capa, i + 32 * api_index); 552 } 553 554 return 0; 555 } 556 557 static void 558 iwm_fw_info_free(struct iwm_fw_info *fw) 559 { 560 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 561 fw->fw_fp = NULL; 562 memset(fw->img, 0, sizeof(fw->img)); 563 } 564 565 static int 566 iwm_read_firmware(struct iwm_softc *sc) 567 { 568 struct iwm_fw_info *fw = &sc->sc_fw; 569 const struct iwm_tlv_ucode_header *uhdr; 570 const struct iwm_ucode_tlv *tlv; 571 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa; 572 enum iwm_ucode_tlv_type tlv_type; 573 const struct firmware *fwp; 574 const uint8_t *data; 575 uint32_t tlv_len; 576 uint32_t usniffer_img; 577 const uint8_t *tlv_data; 578 uint32_t paging_mem_size; 579 int num_of_cpus; 580 int error = 0; 581 size_t len; 582 583 /* 584 * Load firmware into driver memory. 585 * fw_fp will be set. 586 */ 587 fwp = firmware_get(sc->cfg->fw_name); 588 if (fwp == NULL) { 589 device_printf(sc->sc_dev, 590 "could not read firmware %s (error %d)\n", 591 sc->cfg->fw_name, error); 592 goto out; 593 } 594 fw->fw_fp = fwp; 595 596 /* (Re-)Initialize default values. */ 597 capa->flags = 0; 598 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH; 599 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS; 600 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa)); 601 memset(capa->enabled_api, 0, sizeof(capa->enabled_api)); 602 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 603 604 /* 605 * Parse firmware contents 606 */ 607 608 uhdr = (const void *)fw->fw_fp->data; 609 if (*(const uint32_t *)fw->fw_fp->data != 0 610 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 611 device_printf(sc->sc_dev, "invalid firmware %s\n", 612 sc->cfg->fw_name); 613 error = EINVAL; 614 goto out; 615 } 616 617 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)", 618 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 619 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 620 IWM_UCODE_API(le32toh(uhdr->ver))); 621 data = uhdr->data; 622 len = fw->fw_fp->datasize - sizeof(*uhdr); 623 624 while (len >= sizeof(*tlv)) { 625 len -= sizeof(*tlv); 626 tlv = (const void *)data; 627 628 tlv_len = le32toh(tlv->length); 629 tlv_type = le32toh(tlv->type); 630 tlv_data = tlv->data; 631 632 if (len < tlv_len) { 633 device_printf(sc->sc_dev, 634 "firmware too short: %zu bytes\n", 635 len); 636 error = EINVAL; 637 goto parse_out; 638 } 639 len -= roundup2(tlv_len, 4); 640 data += sizeof(*tlv) + roundup2(tlv_len, 4); 641 642 switch ((int)tlv_type) { 643 case IWM_UCODE_TLV_PROBE_MAX_LEN: 644 if (tlv_len != sizeof(uint32_t)) { 645 device_printf(sc->sc_dev, 646 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n", 647 __func__, tlv_len); 648 error = EINVAL; 649 goto parse_out; 650 } 651 capa->max_probe_length = 652 le32_to_cpup((const uint32_t *)tlv_data); 653 /* limit it to something sensible */ 654 if (capa->max_probe_length > 655 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 656 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 657 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 658 "ridiculous\n", __func__); 659 error = EINVAL; 660 goto parse_out; 661 } 662 break; 663 case IWM_UCODE_TLV_PAN: 664 if (tlv_len) { 665 device_printf(sc->sc_dev, 666 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n", 667 __func__, tlv_len); 668 error = EINVAL; 669 goto parse_out; 670 } 671 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN; 672 break; 673 case IWM_UCODE_TLV_FLAGS: 674 if (tlv_len < sizeof(uint32_t)) { 675 device_printf(sc->sc_dev, 676 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n", 677 __func__, tlv_len); 678 error = EINVAL; 679 goto parse_out; 680 } 681 if (tlv_len % sizeof(uint32_t)) { 682 device_printf(sc->sc_dev, 683 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n", 684 __func__, tlv_len); 685 error = EINVAL; 686 goto parse_out; 687 } 688 /* 689 * Apparently there can be many flags, but Linux driver 690 * parses only the first one, and so do we. 691 * 692 * XXX: why does this override IWM_UCODE_TLV_PAN? 693 * Intentional or a bug? Observations from 694 * current firmware file: 695 * 1) TLV_PAN is parsed first 696 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 697 * ==> this resets TLV_PAN to itself... hnnnk 698 */ 699 capa->flags = le32_to_cpup((const uint32_t *)tlv_data); 700 break; 701 case IWM_UCODE_TLV_CSCHEME: 702 if ((error = iwm_store_cscheme(sc, 703 tlv_data, tlv_len)) != 0) { 704 device_printf(sc->sc_dev, 705 "%s: iwm_store_cscheme(): returned %d\n", 706 __func__, error); 707 goto parse_out; 708 } 709 break; 710 case IWM_UCODE_TLV_NUM_OF_CPU: 711 if (tlv_len != sizeof(uint32_t)) { 712 device_printf(sc->sc_dev, 713 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n", 714 __func__, tlv_len); 715 error = EINVAL; 716 goto parse_out; 717 } 718 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data); 719 if (num_of_cpus == 2) { 720 fw->img[IWM_UCODE_REGULAR].is_dual_cpus = 721 TRUE; 722 fw->img[IWM_UCODE_INIT].is_dual_cpus = 723 TRUE; 724 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus = 725 TRUE; 726 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { 727 device_printf(sc->sc_dev, 728 "%s: Driver supports only 1 or 2 CPUs\n", 729 __func__); 730 error = EINVAL; 731 goto parse_out; 732 } 733 break; 734 case IWM_UCODE_TLV_SEC_RT: 735 if ((error = iwm_firmware_store_section(sc, 736 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) { 737 device_printf(sc->sc_dev, 738 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n", 739 __func__, error); 740 goto parse_out; 741 } 742 break; 743 case IWM_UCODE_TLV_SEC_INIT: 744 if ((error = iwm_firmware_store_section(sc, 745 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) { 746 device_printf(sc->sc_dev, 747 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n", 748 __func__, error); 749 goto parse_out; 750 } 751 break; 752 case IWM_UCODE_TLV_SEC_WOWLAN: 753 if ((error = iwm_firmware_store_section(sc, 754 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) { 755 device_printf(sc->sc_dev, 756 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n", 757 __func__, error); 758 goto parse_out; 759 } 760 break; 761 case IWM_UCODE_TLV_DEF_CALIB: 762 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 763 device_printf(sc->sc_dev, 764 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n", 765 __func__, tlv_len, 766 sizeof(struct iwm_tlv_calib_data)); 767 error = EINVAL; 768 goto parse_out; 769 } 770 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 771 device_printf(sc->sc_dev, 772 "%s: iwm_set_default_calib() failed: %d\n", 773 __func__, error); 774 goto parse_out; 775 } 776 break; 777 case IWM_UCODE_TLV_PHY_SKU: 778 if (tlv_len != sizeof(uint32_t)) { 779 error = EINVAL; 780 device_printf(sc->sc_dev, 781 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n", 782 __func__, tlv_len); 783 goto parse_out; 784 } 785 sc->sc_fw.phy_config = 786 le32_to_cpup((const uint32_t *)tlv_data); 787 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config & 788 IWM_FW_PHY_CFG_TX_CHAIN) >> 789 IWM_FW_PHY_CFG_TX_CHAIN_POS; 790 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config & 791 IWM_FW_PHY_CFG_RX_CHAIN) >> 792 IWM_FW_PHY_CFG_RX_CHAIN_POS; 793 break; 794 795 case IWM_UCODE_TLV_API_CHANGES_SET: { 796 if (tlv_len != sizeof(struct iwm_ucode_api)) { 797 error = EINVAL; 798 goto parse_out; 799 } 800 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) { 801 error = EINVAL; 802 goto parse_out; 803 } 804 break; 805 } 806 807 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 808 if (tlv_len != sizeof(struct iwm_ucode_capa)) { 809 error = EINVAL; 810 goto parse_out; 811 } 812 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) { 813 error = EINVAL; 814 goto parse_out; 815 } 816 break; 817 } 818 819 case IWM_UCODE_TLV_CMD_VERSIONS: 820 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 821 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 822 /* ignore, not used by current driver */ 823 break; 824 825 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 826 if ((error = iwm_firmware_store_section(sc, 827 IWM_UCODE_REGULAR_USNIFFER, tlv_data, 828 tlv_len)) != 0) 829 goto parse_out; 830 break; 831 832 case IWM_UCODE_TLV_PAGING: 833 if (tlv_len != sizeof(uint32_t)) { 834 error = EINVAL; 835 goto parse_out; 836 } 837 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data); 838 839 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 840 "%s: Paging: paging enabled (size = %u bytes)\n", 841 __func__, paging_mem_size); 842 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) { 843 device_printf(sc->sc_dev, 844 "%s: Paging: driver supports up to %u bytes for paging image\n", 845 __func__, IWM_MAX_PAGING_IMAGE_SIZE); 846 error = EINVAL; 847 goto out; 848 } 849 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) { 850 device_printf(sc->sc_dev, 851 "%s: Paging: image isn't multiple %u\n", 852 __func__, IWM_FW_PAGING_SIZE); 853 error = EINVAL; 854 goto out; 855 } 856 857 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size = 858 paging_mem_size; 859 usniffer_img = IWM_UCODE_REGULAR_USNIFFER; 860 sc->sc_fw.img[usniffer_img].paging_mem_size = 861 paging_mem_size; 862 break; 863 864 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 865 if (tlv_len != sizeof(uint32_t)) { 866 error = EINVAL; 867 goto parse_out; 868 } 869 capa->n_scan_channels = 870 le32_to_cpup((const uint32_t *)tlv_data); 871 break; 872 873 case IWM_UCODE_TLV_FW_VERSION: 874 if (tlv_len != sizeof(uint32_t) * 3) { 875 error = EINVAL; 876 goto parse_out; 877 } 878 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 879 "%u.%u.%u", 880 le32toh(((const uint32_t *)tlv_data)[0]), 881 le32toh(((const uint32_t *)tlv_data)[1]), 882 le32toh(((const uint32_t *)tlv_data)[2])); 883 break; 884 885 case IWM_UCODE_TLV_FW_MEM_SEG: 886 break; 887 888 default: 889 device_printf(sc->sc_dev, 890 "%s: unknown firmware section %d, abort\n", 891 __func__, tlv_type); 892 error = EINVAL; 893 goto parse_out; 894 } 895 } 896 897 KASSERT(error == 0, ("unhandled error")); 898 899 parse_out: 900 if (error) { 901 device_printf(sc->sc_dev, "firmware parse error %d, " 902 "section type %d\n", error, tlv_type); 903 } 904 905 out: 906 if (error) { 907 if (fw->fw_fp != NULL) 908 iwm_fw_info_free(fw); 909 } 910 911 return error; 912 } 913 914 /* 915 * DMA resource routines 916 */ 917 918 /* fwmem is used to load firmware onto the card */ 919 static int 920 iwm_alloc_fwmem(struct iwm_softc *sc) 921 { 922 /* Must be aligned on a 16-byte boundary. */ 923 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 924 IWM_FH_MEM_TB_MAX_LENGTH, 16); 925 } 926 927 /* tx scheduler rings. not used? */ 928 static int 929 iwm_alloc_sched(struct iwm_softc *sc) 930 { 931 /* TX scheduler rings must be aligned on a 1KB boundary. */ 932 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 933 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 934 } 935 936 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 937 static int 938 iwm_alloc_kw(struct iwm_softc *sc) 939 { 940 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 941 } 942 943 /* interrupt cause table */ 944 static int 945 iwm_alloc_ict(struct iwm_softc *sc) 946 { 947 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 948 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 949 } 950 951 static int 952 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 953 { 954 bus_size_t size; 955 size_t descsz; 956 int count, i, error; 957 958 ring->cur = 0; 959 if (sc->cfg->mqrx_supported) { 960 count = IWM_RX_MQ_RING_COUNT; 961 descsz = sizeof(uint64_t); 962 } else { 963 count = IWM_RX_LEGACY_RING_COUNT; 964 descsz = sizeof(uint32_t); 965 } 966 967 /* Allocate RX descriptors (256-byte aligned). */ 968 size = count * descsz; 969 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 970 256); 971 if (error != 0) { 972 device_printf(sc->sc_dev, 973 "could not allocate RX ring DMA memory\n"); 974 goto fail; 975 } 976 ring->desc = ring->free_desc_dma.vaddr; 977 978 /* Allocate RX status area (16-byte aligned). */ 979 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 980 sizeof(*ring->stat), 16); 981 if (error != 0) { 982 device_printf(sc->sc_dev, 983 "could not allocate RX status DMA memory\n"); 984 goto fail; 985 } 986 ring->stat = ring->stat_dma.vaddr; 987 988 if (sc->cfg->mqrx_supported) { 989 size = count * sizeof(uint32_t); 990 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma, 991 size, 256); 992 if (error != 0) { 993 device_printf(sc->sc_dev, 994 "could not allocate RX ring DMA memory\n"); 995 goto fail; 996 } 997 } 998 999 /* Create RX buffer DMA tag. */ 1000 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1001 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1002 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 1003 if (error != 0) { 1004 device_printf(sc->sc_dev, 1005 "%s: could not create RX buf DMA tag, error %d\n", 1006 __func__, error); 1007 goto fail; 1008 } 1009 1010 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 1011 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 1012 if (error != 0) { 1013 device_printf(sc->sc_dev, 1014 "%s: could not create RX buf DMA map, error %d\n", 1015 __func__, error); 1016 goto fail; 1017 } 1018 1019 /* 1020 * Allocate and map RX buffers. 1021 */ 1022 for (i = 0; i < count; i++) { 1023 struct iwm_rx_data *data = &ring->data[i]; 1024 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1025 if (error != 0) { 1026 device_printf(sc->sc_dev, 1027 "%s: could not create RX buf DMA map, error %d\n", 1028 __func__, error); 1029 goto fail; 1030 } 1031 data->m = NULL; 1032 1033 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 1034 goto fail; 1035 } 1036 } 1037 return 0; 1038 1039 fail: iwm_free_rx_ring(sc, ring); 1040 return error; 1041 } 1042 1043 static void 1044 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1045 { 1046 /* Reset the ring state */ 1047 ring->cur = 0; 1048 1049 /* 1050 * The hw rx ring index in shared memory must also be cleared, 1051 * otherwise the discrepancy can cause reprocessing chaos. 1052 */ 1053 if (sc->rxq.stat) 1054 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1055 } 1056 1057 static void 1058 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1059 { 1060 int count, i; 1061 1062 iwm_dma_contig_free(&ring->free_desc_dma); 1063 iwm_dma_contig_free(&ring->stat_dma); 1064 iwm_dma_contig_free(&ring->used_desc_dma); 1065 1066 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT : 1067 IWM_RX_LEGACY_RING_COUNT; 1068 1069 for (i = 0; i < count; i++) { 1070 struct iwm_rx_data *data = &ring->data[i]; 1071 1072 if (data->m != NULL) { 1073 bus_dmamap_sync(ring->data_dmat, data->map, 1074 BUS_DMASYNC_POSTREAD); 1075 bus_dmamap_unload(ring->data_dmat, data->map); 1076 m_freem(data->m); 1077 data->m = NULL; 1078 } 1079 if (data->map != NULL) { 1080 bus_dmamap_destroy(ring->data_dmat, data->map); 1081 data->map = NULL; 1082 } 1083 } 1084 if (ring->spare_map != NULL) { 1085 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1086 ring->spare_map = NULL; 1087 } 1088 if (ring->data_dmat != NULL) { 1089 bus_dma_tag_destroy(ring->data_dmat); 1090 ring->data_dmat = NULL; 1091 } 1092 } 1093 1094 static int 1095 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1096 { 1097 bus_addr_t paddr; 1098 bus_size_t size; 1099 size_t maxsize; 1100 int nsegments; 1101 int i, error; 1102 1103 ring->qid = qid; 1104 ring->queued = 0; 1105 ring->cur = 0; 1106 1107 /* Allocate TX descriptors (256-byte aligned). */ 1108 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1109 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1110 if (error != 0) { 1111 device_printf(sc->sc_dev, 1112 "could not allocate TX ring DMA memory\n"); 1113 goto fail; 1114 } 1115 ring->desc = ring->desc_dma.vaddr; 1116 1117 /* 1118 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1119 * to allocate commands space for other rings. 1120 */ 1121 if (qid > IWM_CMD_QUEUE) 1122 return 0; 1123 1124 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1125 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1126 if (error != 0) { 1127 device_printf(sc->sc_dev, 1128 "could not allocate TX cmd DMA memory\n"); 1129 goto fail; 1130 } 1131 ring->cmd = ring->cmd_dma.vaddr; 1132 1133 /* FW commands may require more mapped space than packets. */ 1134 if (qid == IWM_CMD_QUEUE) { 1135 maxsize = IWM_RBUF_SIZE; 1136 nsegments = 1; 1137 } else { 1138 maxsize = MCLBYTES; 1139 nsegments = IWM_MAX_SCATTER - 2; 1140 } 1141 1142 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1143 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1144 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat); 1145 if (error != 0) { 1146 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1147 goto fail; 1148 } 1149 1150 paddr = ring->cmd_dma.paddr; 1151 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1152 struct iwm_tx_data *data = &ring->data[i]; 1153 1154 data->cmd_paddr = paddr; 1155 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1156 + offsetof(struct iwm_tx_cmd, scratch); 1157 paddr += sizeof(struct iwm_device_cmd); 1158 1159 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1160 if (error != 0) { 1161 device_printf(sc->sc_dev, 1162 "could not create TX buf DMA map\n"); 1163 goto fail; 1164 } 1165 } 1166 KASSERT(paddr == ring->cmd_dma.paddr + size, 1167 ("invalid physical address")); 1168 return 0; 1169 1170 fail: iwm_free_tx_ring(sc, ring); 1171 return error; 1172 } 1173 1174 static void 1175 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1176 { 1177 int i; 1178 1179 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1180 struct iwm_tx_data *data = &ring->data[i]; 1181 1182 if (data->m != NULL) { 1183 bus_dmamap_sync(ring->data_dmat, data->map, 1184 BUS_DMASYNC_POSTWRITE); 1185 bus_dmamap_unload(ring->data_dmat, data->map); 1186 m_freem(data->m); 1187 data->m = NULL; 1188 } 1189 } 1190 /* Clear TX descriptors. */ 1191 memset(ring->desc, 0, ring->desc_dma.size); 1192 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1193 BUS_DMASYNC_PREWRITE); 1194 sc->qfullmsk &= ~(1 << ring->qid); 1195 ring->queued = 0; 1196 ring->cur = 0; 1197 1198 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake) 1199 iwm_pcie_clear_cmd_in_flight(sc); 1200 } 1201 1202 static void 1203 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1204 { 1205 int i; 1206 1207 iwm_dma_contig_free(&ring->desc_dma); 1208 iwm_dma_contig_free(&ring->cmd_dma); 1209 1210 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1211 struct iwm_tx_data *data = &ring->data[i]; 1212 1213 if (data->m != NULL) { 1214 bus_dmamap_sync(ring->data_dmat, data->map, 1215 BUS_DMASYNC_POSTWRITE); 1216 bus_dmamap_unload(ring->data_dmat, data->map); 1217 m_freem(data->m); 1218 data->m = NULL; 1219 } 1220 if (data->map != NULL) { 1221 bus_dmamap_destroy(ring->data_dmat, data->map); 1222 data->map = NULL; 1223 } 1224 } 1225 if (ring->data_dmat != NULL) { 1226 bus_dma_tag_destroy(ring->data_dmat); 1227 ring->data_dmat = NULL; 1228 } 1229 } 1230 1231 /* 1232 * High-level hardware frobbing routines 1233 */ 1234 1235 static void 1236 iwm_enable_interrupts(struct iwm_softc *sc) 1237 { 1238 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1239 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1240 } 1241 1242 static void 1243 iwm_restore_interrupts(struct iwm_softc *sc) 1244 { 1245 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1246 } 1247 1248 static void 1249 iwm_disable_interrupts(struct iwm_softc *sc) 1250 { 1251 /* disable interrupts */ 1252 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1253 1254 /* acknowledge all interrupts */ 1255 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1256 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1257 } 1258 1259 static void 1260 iwm_ict_reset(struct iwm_softc *sc) 1261 { 1262 iwm_disable_interrupts(sc); 1263 1264 /* Reset ICT table. */ 1265 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1266 sc->ict_cur = 0; 1267 1268 /* Set physical address of ICT table (4KB aligned). */ 1269 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1270 IWM_CSR_DRAM_INT_TBL_ENABLE 1271 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1272 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1273 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1274 1275 /* Switch to ICT interrupt mode in driver. */ 1276 sc->sc_flags |= IWM_FLAG_USE_ICT; 1277 1278 /* Re-enable interrupts. */ 1279 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1280 iwm_enable_interrupts(sc); 1281 } 1282 1283 /* iwlwifi pcie/trans.c */ 1284 1285 /* 1286 * Since this .. hard-resets things, it's time to actually 1287 * mark the first vap (if any) as having no mac context. 1288 * It's annoying, but since the driver is potentially being 1289 * stop/start'ed whilst active (thanks openbsd port!) we 1290 * have to correctly track this. 1291 */ 1292 static void 1293 iwm_stop_device(struct iwm_softc *sc) 1294 { 1295 struct ieee80211com *ic = &sc->sc_ic; 1296 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1297 int chnl, qid; 1298 uint32_t mask = 0; 1299 1300 /* tell the device to stop sending interrupts */ 1301 iwm_disable_interrupts(sc); 1302 1303 /* 1304 * FreeBSD-local: mark the first vap as not-uploaded, 1305 * so the next transition through auth/assoc 1306 * will correctly populate the MAC context. 1307 */ 1308 if (vap) { 1309 struct iwm_vap *iv = IWM_VAP(vap); 1310 iv->phy_ctxt = NULL; 1311 iv->is_uploaded = 0; 1312 } 1313 sc->sc_firmware_state = 0; 1314 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE; 1315 1316 /* device going down, Stop using ICT table */ 1317 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1318 1319 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1320 1321 if (iwm_nic_lock(sc)) { 1322 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1323 1324 /* Stop each Tx DMA channel */ 1325 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1326 IWM_WRITE(sc, 1327 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1328 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl); 1329 } 1330 1331 /* Wait for DMA channels to be idle */ 1332 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask, 1333 5000)) { 1334 device_printf(sc->sc_dev, 1335 "Failing on timeout while stopping DMA channel: [0x%08x]\n", 1336 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)); 1337 } 1338 iwm_nic_unlock(sc); 1339 } 1340 iwm_pcie_rx_stop(sc); 1341 1342 /* Stop RX ring. */ 1343 iwm_reset_rx_ring(sc, &sc->rxq); 1344 1345 /* Reset all TX rings. */ 1346 for (qid = 0; qid < nitems(sc->txq); qid++) 1347 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1348 1349 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1350 /* Power-down device's busmaster DMA clocks */ 1351 if (iwm_nic_lock(sc)) { 1352 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, 1353 IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1354 iwm_nic_unlock(sc); 1355 } 1356 DELAY(5); 1357 } 1358 1359 /* Make sure (redundant) we've released our request to stay awake */ 1360 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1361 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1362 1363 /* Stop the device, and put it in low power state */ 1364 iwm_apm_stop(sc); 1365 1366 /* stop and reset the on-board processor */ 1367 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1368 DELAY(5000); 1369 1370 /* 1371 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1372 */ 1373 iwm_disable_interrupts(sc); 1374 1375 /* 1376 * Even if we stop the HW, we still want the RF kill 1377 * interrupt 1378 */ 1379 iwm_enable_rfkill_int(sc); 1380 iwm_check_rfkill(sc); 1381 1382 iwm_prepare_card_hw(sc); 1383 } 1384 1385 /* iwlwifi: mvm/ops.c */ 1386 static void 1387 iwm_nic_config(struct iwm_softc *sc) 1388 { 1389 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1390 uint32_t reg_val = 0; 1391 uint32_t phy_config = iwm_get_phy_config(sc); 1392 1393 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1394 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1395 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1396 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1397 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1398 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1399 1400 /* SKU control */ 1401 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1402 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1403 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1404 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1405 1406 /* radio configuration */ 1407 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1408 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1409 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1410 1411 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, 1412 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 1413 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | 1414 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | 1415 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | 1416 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | 1417 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 1418 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | 1419 reg_val); 1420 1421 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1422 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1423 radio_cfg_step, radio_cfg_dash); 1424 1425 /* 1426 * W/A : NIC is stuck in a reset state after Early PCIe power off 1427 * (PCIe power is lost before PERST# is asserted), causing ME FW 1428 * to lose ownership and not being able to obtain it back. 1429 */ 1430 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1431 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1432 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1433 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1434 } 1435 } 1436 1437 static int 1438 iwm_nic_rx_mq_init(struct iwm_softc *sc) 1439 { 1440 int enabled; 1441 1442 if (!iwm_nic_lock(sc)) 1443 return EBUSY; 1444 1445 /* Stop RX DMA. */ 1446 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0); 1447 /* Disable RX used and free queue operation. */ 1448 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0); 1449 1450 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB, 1451 sc->rxq.free_desc_dma.paddr); 1452 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB, 1453 sc->rxq.used_desc_dma.paddr); 1454 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB, 1455 sc->rxq.stat_dma.paddr); 1456 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0); 1457 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0); 1458 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0); 1459 1460 /* We configure only queue 0 for now. */ 1461 enabled = ((1 << 0) << 16) | (1 << 0); 1462 1463 /* Enable RX DMA, 4KB buffer size. */ 1464 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 1465 IWM_RFH_DMA_EN_ENABLE_VAL | 1466 IWM_RFH_RXF_DMA_RB_SIZE_4K | 1467 IWM_RFH_RXF_DMA_MIN_RB_4_8 | 1468 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 1469 IWM_RFH_RXF_DMA_RBDCB_SIZE_512); 1470 1471 /* Enable RX DMA snooping. */ 1472 iwm_write_prph(sc, IWM_RFH_GEN_CFG, 1473 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP | 1474 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP | 1475 (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 1476 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 1477 1478 /* Enable the configured queue(s). */ 1479 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled); 1480 1481 iwm_nic_unlock(sc); 1482 1483 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1484 1485 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8); 1486 1487 return (0); 1488 } 1489 1490 static int 1491 iwm_nic_rx_legacy_init(struct iwm_softc *sc) 1492 { 1493 1494 /* Stop Rx DMA */ 1495 iwm_pcie_rx_stop(sc); 1496 1497 if (!iwm_nic_lock(sc)) 1498 return EBUSY; 1499 1500 /* reset and flush pointers */ 1501 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1502 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1503 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1504 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1505 1506 /* Set physical address of RX ring (256-byte aligned). */ 1507 IWM_WRITE(sc, 1508 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, 1509 sc->rxq.free_desc_dma.paddr >> 8); 1510 1511 /* Set physical address of RX status (16-byte aligned). */ 1512 IWM_WRITE(sc, 1513 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1514 1515 /* Enable Rx DMA 1516 * XXX 5000 HW isn't supported by the iwm(4) driver. 1517 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 1518 * the credit mechanism in 5000 HW RX FIFO 1519 * Direct rx interrupts to hosts 1520 * Rx buffer size 4 or 8k or 12k 1521 * RB timeout 0x10 1522 * 256 RBDs 1523 */ 1524 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1525 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1526 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1527 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1528 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1529 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1530 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1531 1532 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1533 1534 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1535 if (sc->cfg->host_interrupt_operation_mode) 1536 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1537 1538 iwm_nic_unlock(sc); 1539 1540 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1541 1542 return 0; 1543 } 1544 1545 static int 1546 iwm_nic_rx_init(struct iwm_softc *sc) 1547 { 1548 if (sc->cfg->mqrx_supported) 1549 return iwm_nic_rx_mq_init(sc); 1550 else 1551 return iwm_nic_rx_legacy_init(sc); 1552 } 1553 1554 static int 1555 iwm_nic_tx_init(struct iwm_softc *sc) 1556 { 1557 int qid; 1558 1559 if (!iwm_nic_lock(sc)) 1560 return EBUSY; 1561 1562 /* Deactivate TX scheduler. */ 1563 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1564 1565 /* Set physical address of "keep warm" page (16-byte aligned). */ 1566 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1567 1568 /* Initialize TX rings. */ 1569 for (qid = 0; qid < nitems(sc->txq); qid++) { 1570 struct iwm_tx_ring *txq = &sc->txq[qid]; 1571 1572 /* Set physical address of TX ring (256-byte aligned). */ 1573 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1574 txq->desc_dma.paddr >> 8); 1575 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1576 "%s: loading ring %d descriptors (%p) at %lx\n", 1577 __func__, 1578 qid, txq->desc, 1579 (unsigned long) (txq->desc_dma.paddr >> 8)); 1580 } 1581 1582 iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL, 1583 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE | 1584 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES); 1585 1586 iwm_nic_unlock(sc); 1587 1588 return 0; 1589 } 1590 1591 static int 1592 iwm_nic_init(struct iwm_softc *sc) 1593 { 1594 int error; 1595 1596 iwm_apm_init(sc); 1597 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1598 iwm_set_pwr(sc); 1599 1600 iwm_nic_config(sc); 1601 1602 if ((error = iwm_nic_rx_init(sc)) != 0) 1603 return error; 1604 1605 /* 1606 * Ditto for TX, from iwn 1607 */ 1608 if ((error = iwm_nic_tx_init(sc)) != 0) 1609 return error; 1610 1611 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1612 "%s: shadow registers enabled\n", __func__); 1613 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1614 1615 return 0; 1616 } 1617 1618 int 1619 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1620 { 1621 int qmsk; 1622 1623 qmsk = 1 << qid; 1624 1625 if (!iwm_nic_lock(sc)) { 1626 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n", 1627 __func__, qid); 1628 return EBUSY; 1629 } 1630 1631 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1632 1633 if (qid == IWM_CMD_QUEUE) { 1634 /* Disable the scheduler. */ 1635 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0); 1636 1637 /* Stop the TX queue prior to configuration. */ 1638 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1639 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1640 (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1641 1642 iwm_nic_unlock(sc); 1643 1644 /* Disable aggregations for this queue. */ 1645 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk); 1646 1647 if (!iwm_nic_lock(sc)) { 1648 device_printf(sc->sc_dev, 1649 "%s: cannot enable txq %d\n", __func__, qid); 1650 return EBUSY; 1651 } 1652 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1653 iwm_nic_unlock(sc); 1654 1655 iwm_write_mem32(sc, 1656 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1657 /* Set scheduler window size and frame limit. */ 1658 iwm_write_mem32(sc, 1659 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1660 sizeof(uint32_t), 1661 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1662 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1663 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1664 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1665 1666 if (!iwm_nic_lock(sc)) { 1667 device_printf(sc->sc_dev, 1668 "%s: cannot enable txq %d\n", __func__, qid); 1669 return EBUSY; 1670 } 1671 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1672 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1673 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1674 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1675 IWM_SCD_QUEUE_STTS_REG_MSK); 1676 1677 /* Enable the scheduler for this queue. */ 1678 iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk); 1679 } else { 1680 struct iwm_scd_txq_cfg_cmd cmd; 1681 int error; 1682 1683 iwm_nic_unlock(sc); 1684 1685 memset(&cmd, 0, sizeof(cmd)); 1686 cmd.scd_queue = qid; 1687 cmd.enable = 1; 1688 cmd.sta_id = sta_id; 1689 cmd.tx_fifo = fifo; 1690 cmd.aggregate = 0; 1691 cmd.window = IWM_FRAME_LIMIT; 1692 1693 error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1694 sizeof(cmd), &cmd); 1695 if (error) { 1696 device_printf(sc->sc_dev, 1697 "cannot enable txq %d\n", qid); 1698 return error; 1699 } 1700 1701 if (!iwm_nic_lock(sc)) 1702 return EBUSY; 1703 } 1704 1705 iwm_nic_unlock(sc); 1706 1707 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1708 __func__, qid, fifo); 1709 1710 return 0; 1711 } 1712 1713 static int 1714 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr) 1715 { 1716 int error, chnl; 1717 1718 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1719 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t); 1720 1721 if (!iwm_nic_lock(sc)) 1722 return EBUSY; 1723 1724 iwm_ict_reset(sc); 1725 1726 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1727 if (scd_base_addr != 0 && 1728 scd_base_addr != sc->scd_base_addr) { 1729 device_printf(sc->sc_dev, 1730 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1731 __func__, sc->scd_base_addr, scd_base_addr); 1732 } 1733 1734 iwm_nic_unlock(sc); 1735 1736 /* reset context data, TX status and translation data */ 1737 error = iwm_write_mem(sc, 1738 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1739 NULL, clear_dwords); 1740 if (error) 1741 return EBUSY; 1742 1743 if (!iwm_nic_lock(sc)) 1744 return EBUSY; 1745 1746 /* Set physical address of TX scheduler rings (1KB aligned). */ 1747 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1748 1749 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1750 1751 iwm_nic_unlock(sc); 1752 1753 /* enable command channel */ 1754 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7); 1755 if (error) 1756 return error; 1757 1758 if (!iwm_nic_lock(sc)) 1759 return EBUSY; 1760 1761 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1762 1763 /* Enable DMA channels. */ 1764 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1765 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1766 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1767 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1768 } 1769 1770 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1771 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1772 1773 iwm_nic_unlock(sc); 1774 1775 /* Enable L1-Active */ 1776 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 1777 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1778 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1779 } 1780 1781 return error; 1782 } 1783 1784 /* 1785 * NVM read access and content parsing. We do not support 1786 * external NVM or writing NVM. 1787 * iwlwifi/mvm/nvm.c 1788 */ 1789 1790 /* Default NVM size to read */ 1791 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1792 1793 #define IWM_NVM_WRITE_OPCODE 1 1794 #define IWM_NVM_READ_OPCODE 0 1795 1796 /* load nvm chunk response */ 1797 enum { 1798 IWM_READ_NVM_CHUNK_SUCCEED = 0, 1799 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 1800 }; 1801 1802 static int 1803 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1804 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1805 { 1806 struct iwm_nvm_access_cmd nvm_access_cmd = { 1807 .offset = htole16(offset), 1808 .length = htole16(length), 1809 .type = htole16(section), 1810 .op_code = IWM_NVM_READ_OPCODE, 1811 }; 1812 struct iwm_nvm_access_resp *nvm_resp; 1813 struct iwm_rx_packet *pkt; 1814 struct iwm_host_cmd cmd = { 1815 .id = IWM_NVM_ACCESS_CMD, 1816 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL, 1817 .data = { &nvm_access_cmd, }, 1818 }; 1819 int ret, bytes_read, offset_read; 1820 uint8_t *resp_data; 1821 1822 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1823 1824 ret = iwm_send_cmd(sc, &cmd); 1825 if (ret) { 1826 device_printf(sc->sc_dev, 1827 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1828 return ret; 1829 } 1830 1831 pkt = cmd.resp_pkt; 1832 1833 /* Extract NVM response */ 1834 nvm_resp = (void *)pkt->data; 1835 ret = le16toh(nvm_resp->status); 1836 bytes_read = le16toh(nvm_resp->length); 1837 offset_read = le16toh(nvm_resp->offset); 1838 resp_data = nvm_resp->data; 1839 if (ret) { 1840 if ((offset != 0) && 1841 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 1842 /* 1843 * meaning of NOT_VALID_ADDRESS: 1844 * driver try to read chunk from address that is 1845 * multiple of 2K and got an error since addr is empty. 1846 * meaning of (offset != 0): driver already 1847 * read valid data from another chunk so this case 1848 * is not an error. 1849 */ 1850 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1851 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 1852 offset); 1853 *len = 0; 1854 ret = 0; 1855 } else { 1856 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1857 "NVM access command failed with status %d\n", ret); 1858 ret = EIO; 1859 } 1860 goto exit; 1861 } 1862 1863 if (offset_read != offset) { 1864 device_printf(sc->sc_dev, 1865 "NVM ACCESS response with invalid offset %d\n", 1866 offset_read); 1867 ret = EINVAL; 1868 goto exit; 1869 } 1870 1871 if (bytes_read > length) { 1872 device_printf(sc->sc_dev, 1873 "NVM ACCESS response with too much data " 1874 "(%d bytes requested, %d bytes received)\n", 1875 length, bytes_read); 1876 ret = EINVAL; 1877 goto exit; 1878 } 1879 1880 /* Write data to NVM */ 1881 memcpy(data + offset, resp_data, bytes_read); 1882 *len = bytes_read; 1883 1884 exit: 1885 iwm_free_resp(sc, &cmd); 1886 return ret; 1887 } 1888 1889 /* 1890 * Reads an NVM section completely. 1891 * NICs prior to 7000 family don't have a real NVM, but just read 1892 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1893 * by uCode, we need to manually check in this case that we don't 1894 * overflow and try to read more than the EEPROM size. 1895 * For 7000 family NICs, we supply the maximal size we can read, and 1896 * the uCode fills the response with as much data as we can, 1897 * without overflowing, so no check is needed. 1898 */ 1899 static int 1900 iwm_nvm_read_section(struct iwm_softc *sc, 1901 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read) 1902 { 1903 uint16_t seglen, length, offset = 0; 1904 int ret; 1905 1906 /* Set nvm section read length */ 1907 length = IWM_NVM_DEFAULT_CHUNK_SIZE; 1908 1909 seglen = length; 1910 1911 /* Read the NVM until exhausted (reading less than requested) */ 1912 while (seglen == length) { 1913 /* Check no memory assumptions fail and cause an overflow */ 1914 if ((size_read + offset + length) > 1915 sc->cfg->eeprom_size) { 1916 device_printf(sc->sc_dev, 1917 "EEPROM size is too small for NVM\n"); 1918 return ENOBUFS; 1919 } 1920 1921 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen); 1922 if (ret) { 1923 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1924 "Cannot read NVM from section %d offset %d, length %d\n", 1925 section, offset, length); 1926 return ret; 1927 } 1928 offset += seglen; 1929 } 1930 1931 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1932 "NVM section %d read completed\n", section); 1933 *len = offset; 1934 return 0; 1935 } 1936 1937 /* 1938 * BEGIN IWM_NVM_PARSE 1939 */ 1940 1941 /* iwlwifi/iwl-nvm-parse.c */ 1942 1943 /* 1944 * Translate EEPROM flags to net80211. 1945 */ 1946 static uint32_t 1947 iwm_eeprom_channel_flags(uint16_t ch_flags) 1948 { 1949 uint32_t nflags; 1950 1951 nflags = 0; 1952 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 1953 nflags |= IEEE80211_CHAN_PASSIVE; 1954 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 1955 nflags |= IEEE80211_CHAN_NOADHOC; 1956 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 1957 nflags |= IEEE80211_CHAN_DFS; 1958 /* Just in case. */ 1959 nflags |= IEEE80211_CHAN_NOADHOC; 1960 } 1961 1962 return (nflags); 1963 } 1964 1965 static void 1966 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 1967 int maxchans, int *nchans, int ch_idx, size_t ch_num, 1968 const uint8_t bands[]) 1969 { 1970 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags; 1971 uint32_t nflags; 1972 uint16_t ch_flags; 1973 uint8_t ieee; 1974 int error; 1975 1976 for (; ch_idx < ch_num; ch_idx++) { 1977 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 1978 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1979 ieee = iwm_nvm_channels[ch_idx]; 1980 else 1981 ieee = iwm_nvm_channels_8000[ch_idx]; 1982 1983 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 1984 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1985 "Ch. %d Flags %x [%sGHz] - No traffic\n", 1986 ieee, ch_flags, 1987 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1988 "5.2" : "2.4"); 1989 continue; 1990 } 1991 1992 nflags = iwm_eeprom_channel_flags(ch_flags); 1993 error = ieee80211_add_channel(chans, maxchans, nchans, 1994 ieee, 0, 0, nflags, bands); 1995 if (error != 0) 1996 break; 1997 1998 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1999 "Ch. %d Flags %x [%sGHz] - Added\n", 2000 ieee, ch_flags, 2001 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 2002 "5.2" : "2.4"); 2003 } 2004 } 2005 2006 static void 2007 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 2008 struct ieee80211_channel chans[]) 2009 { 2010 struct iwm_softc *sc = ic->ic_softc; 2011 struct iwm_nvm_data *data = sc->nvm_data; 2012 uint8_t bands[IEEE80211_MODE_BYTES]; 2013 size_t ch_num; 2014 2015 memset(bands, 0, sizeof(bands)); 2016 /* 1-13: 11b/g channels. */ 2017 setbit(bands, IEEE80211_MODE_11B); 2018 setbit(bands, IEEE80211_MODE_11G); 2019 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 2020 IWM_NUM_2GHZ_CHANNELS - 1, bands); 2021 2022 /* 14: 11b channel only. */ 2023 clrbit(bands, IEEE80211_MODE_11G); 2024 iwm_add_channel_band(sc, chans, maxchans, nchans, 2025 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 2026 2027 if (data->sku_cap_band_52GHz_enable) { 2028 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 2029 ch_num = nitems(iwm_nvm_channels); 2030 else 2031 ch_num = nitems(iwm_nvm_channels_8000); 2032 memset(bands, 0, sizeof(bands)); 2033 setbit(bands, IEEE80211_MODE_11A); 2034 iwm_add_channel_band(sc, chans, maxchans, nchans, 2035 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2036 } 2037 } 2038 2039 static void 2040 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2041 const uint16_t *mac_override, const uint16_t *nvm_hw) 2042 { 2043 const uint8_t *hw_addr; 2044 2045 if (mac_override) { 2046 static const uint8_t reserved_mac[] = { 2047 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2048 }; 2049 2050 hw_addr = (const uint8_t *)(mac_override + 2051 IWM_MAC_ADDRESS_OVERRIDE_8000); 2052 2053 /* 2054 * Store the MAC address from MAO section. 2055 * No byte swapping is required in MAO section 2056 */ 2057 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2058 2059 /* 2060 * Force the use of the OTP MAC address in case of reserved MAC 2061 * address in the NVM, or if address is given but invalid. 2062 */ 2063 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2064 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2065 iwm_is_valid_ether_addr(data->hw_addr) && 2066 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2067 return; 2068 2069 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2070 "%s: mac address from nvm override section invalid\n", 2071 __func__); 2072 } 2073 2074 if (nvm_hw) { 2075 /* read the mac address from WFMP registers */ 2076 uint32_t mac_addr0 = 2077 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2078 uint32_t mac_addr1 = 2079 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2080 2081 hw_addr = (const uint8_t *)&mac_addr0; 2082 data->hw_addr[0] = hw_addr[3]; 2083 data->hw_addr[1] = hw_addr[2]; 2084 data->hw_addr[2] = hw_addr[1]; 2085 data->hw_addr[3] = hw_addr[0]; 2086 2087 hw_addr = (const uint8_t *)&mac_addr1; 2088 data->hw_addr[4] = hw_addr[1]; 2089 data->hw_addr[5] = hw_addr[0]; 2090 2091 return; 2092 } 2093 2094 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2095 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2096 } 2097 2098 static int 2099 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2100 const uint16_t *phy_sku) 2101 { 2102 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2103 return le16_to_cpup(nvm_sw + IWM_SKU); 2104 2105 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2106 } 2107 2108 static int 2109 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2110 { 2111 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2112 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2113 else 2114 return le32_to_cpup((const uint32_t *)(nvm_sw + 2115 IWM_NVM_VERSION_8000)); 2116 } 2117 2118 static int 2119 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2120 const uint16_t *phy_sku) 2121 { 2122 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2123 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2124 2125 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2126 } 2127 2128 static int 2129 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2130 { 2131 int n_hw_addr; 2132 2133 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) 2134 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2135 2136 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2137 2138 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2139 } 2140 2141 static void 2142 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2143 uint32_t radio_cfg) 2144 { 2145 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2146 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2147 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2148 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2149 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2150 return; 2151 } 2152 2153 /* set the radio configuration for family 8000 */ 2154 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2155 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2156 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2157 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg); 2158 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2159 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2160 } 2161 2162 static int 2163 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, 2164 const uint16_t *nvm_hw, const uint16_t *mac_override) 2165 { 2166 #ifdef notyet /* for FAMILY 9000 */ 2167 if (cfg->mac_addr_from_csr) { 2168 iwm_set_hw_address_from_csr(sc, data); 2169 } else 2170 #endif 2171 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2172 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR); 2173 2174 /* The byte order is little endian 16 bit, meaning 214365 */ 2175 data->hw_addr[0] = hw_addr[1]; 2176 data->hw_addr[1] = hw_addr[0]; 2177 data->hw_addr[2] = hw_addr[3]; 2178 data->hw_addr[3] = hw_addr[2]; 2179 data->hw_addr[4] = hw_addr[5]; 2180 data->hw_addr[5] = hw_addr[4]; 2181 } else { 2182 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw); 2183 } 2184 2185 if (!iwm_is_valid_ether_addr(data->hw_addr)) { 2186 device_printf(sc->sc_dev, "no valid mac address was found\n"); 2187 return EINVAL; 2188 } 2189 2190 return 0; 2191 } 2192 2193 static struct iwm_nvm_data * 2194 iwm_parse_nvm_data(struct iwm_softc *sc, 2195 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2196 const uint16_t *nvm_calib, const uint16_t *mac_override, 2197 const uint16_t *phy_sku, const uint16_t *regulatory) 2198 { 2199 struct iwm_nvm_data *data; 2200 uint32_t sku, radio_cfg; 2201 uint16_t lar_config; 2202 2203 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2204 data = malloc(sizeof(*data) + 2205 IWM_NUM_CHANNELS * sizeof(uint16_t), 2206 M_DEVBUF, M_NOWAIT | M_ZERO); 2207 } else { 2208 data = malloc(sizeof(*data) + 2209 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t), 2210 M_DEVBUF, M_NOWAIT | M_ZERO); 2211 } 2212 if (!data) 2213 return NULL; 2214 2215 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2216 2217 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2218 iwm_set_radio_cfg(sc, data, radio_cfg); 2219 2220 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2221 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2222 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2223 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE; 2224 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE; 2225 2226 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2227 2228 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2229 /* TODO: use IWL_NVM_EXT */ 2230 uint16_t lar_offset = data->nvm_version < 0xE39 ? 2231 IWM_NVM_LAR_OFFSET_8000_OLD : 2232 IWM_NVM_LAR_OFFSET_8000; 2233 2234 lar_config = le16_to_cpup(regulatory + lar_offset); 2235 data->lar_enabled = !!(lar_config & 2236 IWM_NVM_LAR_ENABLED_8000); 2237 } 2238 2239 /* If no valid mac address was found - bail out */ 2240 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) { 2241 free(data, M_DEVBUF); 2242 return NULL; 2243 } 2244 2245 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2246 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ? 2247 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS], 2248 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2249 } else { 2250 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2251 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2252 } 2253 2254 return data; 2255 } 2256 2257 static void 2258 iwm_free_nvm_data(struct iwm_nvm_data *data) 2259 { 2260 if (data != NULL) 2261 free(data, M_DEVBUF); 2262 } 2263 2264 static struct iwm_nvm_data * 2265 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2266 { 2267 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2268 2269 /* Checking for required sections */ 2270 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2271 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2272 !sections[sc->cfg->nvm_hw_section_num].data) { 2273 device_printf(sc->sc_dev, 2274 "Can't parse empty OTP/NVM sections\n"); 2275 return NULL; 2276 } 2277 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2278 /* SW and REGULATORY sections are mandatory */ 2279 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2280 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2281 device_printf(sc->sc_dev, 2282 "Can't parse empty OTP/NVM sections\n"); 2283 return NULL; 2284 } 2285 /* MAC_OVERRIDE or at least HW section must exist */ 2286 if (!sections[sc->cfg->nvm_hw_section_num].data && 2287 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2288 device_printf(sc->sc_dev, 2289 "Can't parse mac_address, empty sections\n"); 2290 return NULL; 2291 } 2292 2293 /* PHY_SKU section is mandatory in B0 */ 2294 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2295 device_printf(sc->sc_dev, 2296 "Can't parse phy_sku in B0, empty sections\n"); 2297 return NULL; 2298 } 2299 } else { 2300 panic("unknown device family %d\n", sc->cfg->device_family); 2301 } 2302 2303 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data; 2304 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2305 calib = (const uint16_t *) 2306 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2307 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ? 2308 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data : 2309 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2310 mac_override = (const uint16_t *) 2311 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2312 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2313 2314 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2315 phy_sku, regulatory); 2316 } 2317 2318 static int 2319 iwm_nvm_init(struct iwm_softc *sc) 2320 { 2321 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS]; 2322 int i, ret, section; 2323 uint32_t size_read = 0; 2324 uint8_t *nvm_buffer, *temp; 2325 uint16_t len; 2326 2327 memset(nvm_sections, 0, sizeof(nvm_sections)); 2328 2329 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS) 2330 return EINVAL; 2331 2332 /* load NVM values from nic */ 2333 /* Read From FW NVM */ 2334 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n"); 2335 2336 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO); 2337 if (!nvm_buffer) 2338 return ENOMEM; 2339 for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) { 2340 /* we override the constness for initial read */ 2341 ret = iwm_nvm_read_section(sc, section, nvm_buffer, 2342 &len, size_read); 2343 if (ret) 2344 continue; 2345 size_read += len; 2346 temp = malloc(len, M_DEVBUF, M_NOWAIT); 2347 if (!temp) { 2348 ret = ENOMEM; 2349 break; 2350 } 2351 memcpy(temp, nvm_buffer, len); 2352 2353 nvm_sections[section].data = temp; 2354 nvm_sections[section].length = len; 2355 } 2356 if (!size_read) 2357 device_printf(sc->sc_dev, "OTP is blank\n"); 2358 free(nvm_buffer, M_DEVBUF); 2359 2360 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections); 2361 if (!sc->nvm_data) 2362 return EINVAL; 2363 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 2364 "nvm version = %x\n", sc->nvm_data->nvm_version); 2365 2366 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) { 2367 if (nvm_sections[i].data != NULL) 2368 free(nvm_sections[i].data, M_DEVBUF); 2369 } 2370 2371 return 0; 2372 } 2373 2374 static int 2375 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num, 2376 const struct iwm_fw_desc *section) 2377 { 2378 struct iwm_dma_info *dma = &sc->fw_dma; 2379 uint8_t *v_addr; 2380 bus_addr_t p_addr; 2381 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len); 2382 int ret = 0; 2383 2384 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2385 "%s: [%d] uCode section being loaded...\n", 2386 __func__, section_num); 2387 2388 v_addr = dma->vaddr; 2389 p_addr = dma->paddr; 2390 2391 for (offset = 0; offset < section->len; offset += chunk_sz) { 2392 uint32_t copy_size, dst_addr; 2393 int extended_addr = FALSE; 2394 2395 copy_size = MIN(chunk_sz, section->len - offset); 2396 dst_addr = section->offset + offset; 2397 2398 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2399 dst_addr <= IWM_FW_MEM_EXTENDED_END) 2400 extended_addr = TRUE; 2401 2402 if (extended_addr) 2403 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2404 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2405 2406 memcpy(v_addr, (const uint8_t *)section->data + offset, 2407 copy_size); 2408 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2409 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr, 2410 copy_size); 2411 2412 if (extended_addr) 2413 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2414 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2415 2416 if (ret) { 2417 device_printf(sc->sc_dev, 2418 "%s: Could not load the [%d] uCode section\n", 2419 __func__, section_num); 2420 break; 2421 } 2422 } 2423 2424 return ret; 2425 } 2426 2427 /* 2428 * ucode 2429 */ 2430 static int 2431 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2432 bus_addr_t phy_addr, uint32_t byte_cnt) 2433 { 2434 sc->sc_fw_chunk_done = 0; 2435 2436 if (!iwm_nic_lock(sc)) 2437 return EBUSY; 2438 2439 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2440 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2441 2442 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2443 dst_addr); 2444 2445 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2446 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2447 2448 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2449 (iwm_get_dma_hi_addr(phy_addr) 2450 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2451 2452 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2453 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2454 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2455 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2456 2457 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2458 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2459 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2460 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2461 2462 iwm_nic_unlock(sc); 2463 2464 /* wait up to 5s for this segment to load */ 2465 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5); 2466 2467 if (!sc->sc_fw_chunk_done) { 2468 device_printf(sc->sc_dev, 2469 "fw chunk addr 0x%x len %d failed to load\n", 2470 dst_addr, byte_cnt); 2471 return ETIMEDOUT; 2472 } 2473 2474 return 0; 2475 } 2476 2477 static int 2478 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 2479 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2480 { 2481 int shift_param; 2482 int i, ret = 0, sec_num = 0x1; 2483 uint32_t val, last_read_idx = 0; 2484 2485 if (cpu == 1) { 2486 shift_param = 0; 2487 *first_ucode_section = 0; 2488 } else { 2489 shift_param = 16; 2490 (*first_ucode_section)++; 2491 } 2492 2493 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2494 last_read_idx = i; 2495 2496 /* 2497 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2498 * CPU1 to CPU2. 2499 * PAGING_SEPARATOR_SECTION delimiter - separate between 2500 * CPU2 non paged to CPU2 paging sec. 2501 */ 2502 if (!image->sec[i].data || 2503 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2504 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2505 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2506 "Break since Data not valid or Empty section, sec = %d\n", 2507 i); 2508 break; 2509 } 2510 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2511 if (ret) 2512 return ret; 2513 2514 /* Notify the ucode of the loaded section number and status */ 2515 if (iwm_nic_lock(sc)) { 2516 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2517 val = val | (sec_num << shift_param); 2518 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2519 sec_num = (sec_num << 1) | 0x1; 2520 iwm_nic_unlock(sc); 2521 } 2522 } 2523 2524 *first_ucode_section = last_read_idx; 2525 2526 iwm_enable_interrupts(sc); 2527 2528 if (iwm_nic_lock(sc)) { 2529 if (cpu == 1) 2530 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2531 else 2532 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2533 iwm_nic_unlock(sc); 2534 } 2535 2536 return 0; 2537 } 2538 2539 static int 2540 iwm_pcie_load_cpu_sections(struct iwm_softc *sc, 2541 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2542 { 2543 int i, ret = 0; 2544 uint32_t last_read_idx = 0; 2545 2546 if (cpu == 1) { 2547 *first_ucode_section = 0; 2548 } else { 2549 (*first_ucode_section)++; 2550 } 2551 2552 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2553 last_read_idx = i; 2554 2555 /* 2556 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2557 * CPU1 to CPU2. 2558 * PAGING_SEPARATOR_SECTION delimiter - separate between 2559 * CPU2 non paged to CPU2 paging sec. 2560 */ 2561 if (!image->sec[i].data || 2562 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2563 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2564 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2565 "Break since Data not valid or Empty section, sec = %d\n", 2566 i); 2567 break; 2568 } 2569 2570 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2571 if (ret) 2572 return ret; 2573 } 2574 2575 *first_ucode_section = last_read_idx; 2576 2577 return 0; 2578 2579 } 2580 2581 static int 2582 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image) 2583 { 2584 int ret = 0; 2585 int first_ucode_section; 2586 2587 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2588 image->is_dual_cpus ? "Dual" : "Single"); 2589 2590 /* load to FW the binary non secured sections of CPU1 */ 2591 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section); 2592 if (ret) 2593 return ret; 2594 2595 if (image->is_dual_cpus) { 2596 /* set CPU2 header address */ 2597 if (iwm_nic_lock(sc)) { 2598 iwm_write_prph(sc, 2599 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 2600 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE); 2601 iwm_nic_unlock(sc); 2602 } 2603 2604 /* load to FW the binary sections of CPU2 */ 2605 ret = iwm_pcie_load_cpu_sections(sc, image, 2, 2606 &first_ucode_section); 2607 if (ret) 2608 return ret; 2609 } 2610 2611 iwm_enable_interrupts(sc); 2612 2613 /* release CPU reset */ 2614 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2615 2616 return 0; 2617 } 2618 2619 int 2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc, 2621 const struct iwm_fw_img *image) 2622 { 2623 int ret = 0; 2624 int first_ucode_section; 2625 2626 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2627 image->is_dual_cpus ? "Dual" : "Single"); 2628 2629 /* configure the ucode to be ready to get the secured image */ 2630 /* release CPU reset */ 2631 if (iwm_nic_lock(sc)) { 2632 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, 2633 IWM_RELEASE_CPU_RESET_BIT); 2634 iwm_nic_unlock(sc); 2635 } 2636 2637 /* load to FW the binary Secured sections of CPU1 */ 2638 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1, 2639 &first_ucode_section); 2640 if (ret) 2641 return ret; 2642 2643 /* load to FW the binary sections of CPU2 */ 2644 return iwm_pcie_load_cpu_sections_8000(sc, image, 2, 2645 &first_ucode_section); 2646 } 2647 2648 /* XXX Get rid of this definition */ 2649 static inline void 2650 iwm_enable_fw_load_int(struct iwm_softc *sc) 2651 { 2652 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n"); 2653 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX; 2654 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 2655 } 2656 2657 /* XXX Add proper rfkill support code */ 2658 static int 2659 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw) 2660 { 2661 int ret; 2662 2663 /* This may fail if AMT took ownership of the device */ 2664 if (iwm_prepare_card_hw(sc)) { 2665 device_printf(sc->sc_dev, 2666 "%s: Exit HW not ready\n", __func__); 2667 ret = EIO; 2668 goto out; 2669 } 2670 2671 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2672 2673 iwm_disable_interrupts(sc); 2674 2675 /* make sure rfkill handshake bits are cleared */ 2676 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2677 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2678 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2679 2680 /* clear (again), then enable host interrupts */ 2681 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2682 2683 ret = iwm_nic_init(sc); 2684 if (ret) { 2685 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__); 2686 goto out; 2687 } 2688 2689 /* 2690 * Now, we load the firmware and don't want to be interrupted, even 2691 * by the RF-Kill interrupt (hence mask all the interrupt besides the 2692 * FH_TX interrupt which is needed to load the firmware). If the 2693 * RF-Kill switch is toggled, we will find out after having loaded 2694 * the firmware and return the proper value to the caller. 2695 */ 2696 iwm_enable_fw_load_int(sc); 2697 2698 /* really make sure rfkill handshake bits are cleared */ 2699 /* maybe we should write a few times more? just to make sure */ 2700 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2701 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2702 2703 /* Load the given image to the HW */ 2704 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) 2705 ret = iwm_pcie_load_given_ucode_8000(sc, fw); 2706 else 2707 ret = iwm_pcie_load_given_ucode(sc, fw); 2708 2709 /* XXX re-check RF-Kill state */ 2710 2711 out: 2712 return ret; 2713 } 2714 2715 static int 2716 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2717 { 2718 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2719 .valid = htole32(valid_tx_ant), 2720 }; 2721 2722 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2723 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2724 } 2725 2726 /* iwlwifi: mvm/fw.c */ 2727 static int 2728 iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2729 { 2730 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2731 enum iwm_ucode_type ucode_type = sc->cur_ucode; 2732 2733 /* Set parameters */ 2734 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc)); 2735 phy_cfg_cmd.calib_control.event_trigger = 2736 sc->sc_default_calib[ucode_type].event_trigger; 2737 phy_cfg_cmd.calib_control.flow_trigger = 2738 sc->sc_default_calib[ucode_type].flow_trigger; 2739 2740 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2741 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2742 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2743 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2744 } 2745 2746 static int 2747 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data) 2748 { 2749 struct iwm_alive_data *alive_data = data; 2750 struct iwm_alive_resp_v3 *palive3; 2751 struct iwm_alive_resp *palive; 2752 struct iwm_umac_alive *umac; 2753 struct iwm_lmac_alive *lmac1; 2754 struct iwm_lmac_alive *lmac2 = NULL; 2755 uint16_t status; 2756 2757 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) { 2758 palive = (void *)pkt->data; 2759 umac = &palive->umac_data; 2760 lmac1 = &palive->lmac_data[0]; 2761 lmac2 = &palive->lmac_data[1]; 2762 status = le16toh(palive->status); 2763 } else { 2764 palive3 = (void *)pkt->data; 2765 umac = &palive3->umac_data; 2766 lmac1 = &palive3->lmac_data; 2767 status = le16toh(palive3->status); 2768 } 2769 2770 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr); 2771 if (lmac2) 2772 sc->error_event_table[1] = 2773 le32toh(lmac2->error_event_table_ptr); 2774 sc->log_event_table = le32toh(lmac1->log_event_table_ptr); 2775 sc->umac_error_event_table = le32toh(umac->error_info_addr); 2776 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr); 2777 alive_data->valid = status == IWM_ALIVE_STATUS_OK; 2778 if (sc->umac_error_event_table) 2779 sc->support_umac_log = TRUE; 2780 2781 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2782 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 2783 status, lmac1->ver_type, lmac1->ver_subtype); 2784 2785 if (lmac2) 2786 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n"); 2787 2788 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2789 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2790 le32toh(umac->umac_major), 2791 le32toh(umac->umac_minor)); 2792 2793 return TRUE; 2794 } 2795 2796 static int 2797 iwm_wait_phy_db_entry(struct iwm_softc *sc, 2798 struct iwm_rx_packet *pkt, void *data) 2799 { 2800 struct iwm_phy_db *phy_db = data; 2801 2802 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) { 2803 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) { 2804 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n", 2805 __func__, pkt->hdr.code); 2806 } 2807 return TRUE; 2808 } 2809 2810 if (iwm_phy_db_set_section(phy_db, pkt)) { 2811 device_printf(sc->sc_dev, 2812 "%s: iwm_phy_db_set_section failed\n", __func__); 2813 } 2814 2815 return FALSE; 2816 } 2817 2818 static int 2819 iwm_load_ucode_wait_alive(struct iwm_softc *sc, 2820 enum iwm_ucode_type ucode_type) 2821 { 2822 struct iwm_notification_wait alive_wait; 2823 struct iwm_alive_data alive_data; 2824 const struct iwm_fw_img *fw; 2825 enum iwm_ucode_type old_type = sc->cur_ucode; 2826 int error; 2827 static const uint16_t alive_cmd[] = { IWM_ALIVE }; 2828 2829 fw = &sc->sc_fw.img[ucode_type]; 2830 sc->cur_ucode = ucode_type; 2831 sc->ucode_loaded = FALSE; 2832 2833 memset(&alive_data, 0, sizeof(alive_data)); 2834 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait, 2835 alive_cmd, nitems(alive_cmd), 2836 iwm_alive_fn, &alive_data); 2837 2838 error = iwm_start_fw(sc, fw); 2839 if (error) { 2840 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2841 sc->cur_ucode = old_type; 2842 iwm_remove_notification(sc->sc_notif_wait, &alive_wait); 2843 return error; 2844 } 2845 2846 /* 2847 * Some things may run in the background now, but we 2848 * just wait for the ALIVE notification here. 2849 */ 2850 IWM_UNLOCK(sc); 2851 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait, 2852 IWM_UCODE_ALIVE_TIMEOUT); 2853 IWM_LOCK(sc); 2854 if (error) { 2855 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 2856 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a; 2857 if (iwm_nic_lock(sc)) { 2858 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS); 2859 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS); 2860 iwm_nic_unlock(sc); 2861 } 2862 device_printf(sc->sc_dev, 2863 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 2864 a, b); 2865 } 2866 sc->cur_ucode = old_type; 2867 return error; 2868 } 2869 2870 if (!alive_data.valid) { 2871 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n", 2872 __func__); 2873 sc->cur_ucode = old_type; 2874 return EIO; 2875 } 2876 2877 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr); 2878 2879 /* 2880 * configure and operate fw paging mechanism. 2881 * driver configures the paging flow only once, CPU2 paging image 2882 * included in the IWM_UCODE_INIT image. 2883 */ 2884 if (fw->paging_mem_size) { 2885 error = iwm_save_fw_paging(sc, fw); 2886 if (error) { 2887 device_printf(sc->sc_dev, 2888 "%s: failed to save the FW paging image\n", 2889 __func__); 2890 return error; 2891 } 2892 2893 error = iwm_send_paging_cmd(sc, fw); 2894 if (error) { 2895 device_printf(sc->sc_dev, 2896 "%s: failed to send the paging cmd\n", __func__); 2897 iwm_free_fw_paging(sc); 2898 return error; 2899 } 2900 } 2901 2902 if (!error) 2903 sc->ucode_loaded = TRUE; 2904 return error; 2905 } 2906 2907 /* 2908 * mvm misc bits 2909 */ 2910 2911 /* 2912 * follows iwlwifi/fw.c 2913 */ 2914 static int 2915 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm) 2916 { 2917 struct iwm_notification_wait calib_wait; 2918 static const uint16_t init_complete[] = { 2919 IWM_INIT_COMPLETE_NOTIF, 2920 IWM_CALIB_RES_NOTIF_PHY_DB 2921 }; 2922 int ret; 2923 2924 /* do not operate with rfkill switch turned on */ 2925 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2926 device_printf(sc->sc_dev, 2927 "radio is disabled by hardware switch\n"); 2928 return EPERM; 2929 } 2930 2931 iwm_init_notification_wait(sc->sc_notif_wait, 2932 &calib_wait, 2933 init_complete, 2934 nitems(init_complete), 2935 iwm_wait_phy_db_entry, 2936 sc->sc_phy_db); 2937 2938 /* Will also start the device */ 2939 ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT); 2940 if (ret) { 2941 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n", 2942 ret); 2943 goto error; 2944 } 2945 2946 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) { 2947 ret = iwm_send_bt_init_conf(sc); 2948 if (ret) { 2949 device_printf(sc->sc_dev, 2950 "failed to send bt coex configuration: %d\n", ret); 2951 goto error; 2952 } 2953 } 2954 2955 if (justnvm) { 2956 /* Read nvm */ 2957 ret = iwm_nvm_init(sc); 2958 if (ret) { 2959 device_printf(sc->sc_dev, "failed to read nvm\n"); 2960 goto error; 2961 } 2962 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr); 2963 goto error; 2964 } 2965 2966 /* Send TX valid antennas before triggering calibrations */ 2967 ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc)); 2968 if (ret) { 2969 device_printf(sc->sc_dev, 2970 "failed to send antennas before calibration: %d\n", ret); 2971 goto error; 2972 } 2973 2974 /* 2975 * Send phy configurations command to init uCode 2976 * to start the 16.0 uCode init image internal calibrations. 2977 */ 2978 ret = iwm_send_phy_cfg_cmd(sc); 2979 if (ret) { 2980 device_printf(sc->sc_dev, 2981 "%s: Failed to run INIT calibrations: %d\n", 2982 __func__, ret); 2983 goto error; 2984 } 2985 2986 /* 2987 * Nothing to do but wait for the init complete notification 2988 * from the firmware. 2989 */ 2990 IWM_UNLOCK(sc); 2991 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait, 2992 IWM_UCODE_CALIB_TIMEOUT); 2993 IWM_LOCK(sc); 2994 2995 2996 goto out; 2997 2998 error: 2999 iwm_remove_notification(sc->sc_notif_wait, &calib_wait); 3000 out: 3001 return ret; 3002 } 3003 3004 static int 3005 iwm_config_ltr(struct iwm_softc *sc) 3006 { 3007 struct iwm_ltr_config_cmd cmd = { 3008 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE), 3009 }; 3010 3011 if (!sc->sc_ltr_enabled) 3012 return 0; 3013 3014 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd); 3015 } 3016 3017 /* 3018 * receive side 3019 */ 3020 3021 /* (re)stock rx ring, called at init-time and at runtime */ 3022 static int 3023 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 3024 { 3025 struct iwm_rx_ring *ring = &sc->rxq; 3026 struct iwm_rx_data *data = &ring->data[idx]; 3027 struct mbuf *m; 3028 bus_dmamap_t dmamap; 3029 bus_dma_segment_t seg; 3030 int nsegs, error; 3031 3032 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 3033 if (m == NULL) 3034 return ENOBUFS; 3035 3036 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3037 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m, 3038 &seg, &nsegs, BUS_DMA_NOWAIT); 3039 if (error != 0) { 3040 device_printf(sc->sc_dev, 3041 "%s: can't map mbuf, error %d\n", __func__, error); 3042 m_freem(m); 3043 return error; 3044 } 3045 3046 if (data->m != NULL) 3047 bus_dmamap_unload(ring->data_dmat, data->map); 3048 3049 /* Swap ring->spare_map with data->map */ 3050 dmamap = data->map; 3051 data->map = ring->spare_map; 3052 ring->spare_map = dmamap; 3053 3054 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 3055 data->m = m; 3056 3057 /* Update RX descriptor. */ 3058 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned")); 3059 if (sc->cfg->mqrx_supported) 3060 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr); 3061 else 3062 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8); 3063 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map, 3064 BUS_DMASYNC_PREWRITE); 3065 3066 return 0; 3067 } 3068 3069 static void 3070 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3071 { 3072 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 3073 3074 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 3075 3076 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 3077 } 3078 3079 /* 3080 * Retrieve the average noise (in dBm) among receivers. 3081 */ 3082 static int 3083 iwm_get_noise(struct iwm_softc *sc, 3084 const struct iwm_statistics_rx_non_phy *stats) 3085 { 3086 int i, noise; 3087 #ifdef IWM_DEBUG 3088 int nbant, total; 3089 #else 3090 int nbant __unused, total __unused; 3091 #endif 3092 3093 total = nbant = noise = 0; 3094 for (i = 0; i < 3; i++) { 3095 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 3096 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n", 3097 __func__, 3098 i, 3099 noise); 3100 3101 if (noise) { 3102 total += noise; 3103 nbant++; 3104 } 3105 } 3106 3107 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n", 3108 __func__, nbant, total); 3109 #if 0 3110 /* There should be at least one antenna but check anyway. */ 3111 return (nbant == 0) ? -127 : (total / nbant) - 107; 3112 #else 3113 /* For now, just hard-code it to -96 to be safe */ 3114 return (-96); 3115 #endif 3116 } 3117 3118 static void 3119 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3120 { 3121 struct iwm_notif_statistics *stats = (void *)&pkt->data; 3122 3123 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 3124 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general); 3125 } 3126 3127 /* iwlwifi: mvm/rx.c */ 3128 /* 3129 * iwm_get_signal_strength - use new rx PHY INFO API 3130 * values are reported by the fw as positive values - need to negate 3131 * to obtain their dBM. Account for missing antennas by replacing 0 3132 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 3133 */ 3134 static int 3135 iwm_rx_get_signal_strength(struct iwm_softc *sc, 3136 struct iwm_rx_phy_info *phy_info) 3137 { 3138 int energy_a, energy_b, energy_c, max_energy; 3139 uint32_t val; 3140 3141 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 3142 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 3143 IWM_RX_INFO_ENERGY_ANT_A_POS; 3144 energy_a = energy_a ? -energy_a : -256; 3145 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 3146 IWM_RX_INFO_ENERGY_ANT_B_POS; 3147 energy_b = energy_b ? -energy_b : -256; 3148 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 3149 IWM_RX_INFO_ENERGY_ANT_C_POS; 3150 energy_c = energy_c ? -energy_c : -256; 3151 max_energy = MAX(energy_a, energy_b); 3152 max_energy = MAX(max_energy, energy_c); 3153 3154 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3155 "energy In A %d B %d C %d , and max %d\n", 3156 energy_a, energy_b, energy_c, max_energy); 3157 3158 return max_energy; 3159 } 3160 3161 static int 3162 iwm_rxmq_get_signal_strength(struct iwm_softc *sc, 3163 struct iwm_rx_mpdu_desc *desc) 3164 { 3165 int energy_a, energy_b; 3166 3167 energy_a = desc->v1.energy_a; 3168 energy_b = desc->v1.energy_b; 3169 energy_a = energy_a ? -energy_a : -256; 3170 energy_b = energy_b ? -energy_b : -256; 3171 return MAX(energy_a, energy_b); 3172 } 3173 3174 /* 3175 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 3176 * 3177 * Handles the actual data of the Rx packet from the fw 3178 */ 3179 static bool 3180 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3181 bool stolen) 3182 { 3183 struct ieee80211com *ic = &sc->sc_ic; 3184 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3185 struct ieee80211_rx_stats rxs; 3186 struct iwm_rx_phy_info *phy_info; 3187 struct iwm_rx_mpdu_res_start *rx_res; 3188 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset); 3189 uint32_t len; 3190 uint32_t rx_pkt_status; 3191 int rssi; 3192 3193 phy_info = &sc->sc_last_phy_info; 3194 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 3195 len = le16toh(rx_res->byte_count); 3196 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 3197 3198 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3199 device_printf(sc->sc_dev, 3200 "dsp size out of range [0,20]: %d\n", 3201 phy_info->cfg_phy_cnt); 3202 return false; 3203 } 3204 3205 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3206 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3207 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3208 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3209 return false; 3210 } 3211 3212 rssi = iwm_rx_get_signal_strength(sc, phy_info); 3213 3214 /* Map it to relative value */ 3215 rssi = rssi - sc->sc_noise; 3216 3217 /* replenish ring for the buffer we're going to feed to the sharks */ 3218 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3219 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3220 __func__); 3221 return false; 3222 } 3223 3224 m->m_data = pkt->data + sizeof(*rx_res); 3225 m->m_pkthdr.len = m->m_len = len; 3226 3227 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3228 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3229 3230 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3231 "%s: phy_info: channel=%d, flags=0x%08x\n", 3232 __func__, 3233 le16toh(phy_info->channel), 3234 le16toh(phy_info->phy_flags)); 3235 3236 /* 3237 * Populate an RX state struct with the provided information. 3238 */ 3239 bzero(&rxs, sizeof(rxs)); 3240 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3241 rxs.r_flags |= IEEE80211_R_BAND; 3242 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3243 rxs.c_ieee = le16toh(phy_info->channel); 3244 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3245 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3246 rxs.c_band = IEEE80211_CHAN_2GHZ; 3247 } else { 3248 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3249 rxs.c_band = IEEE80211_CHAN_5GHZ; 3250 } 3251 3252 /* rssi is in 1/2db units */ 3253 rxs.c_rssi = rssi * 2; 3254 rxs.c_nf = sc->sc_noise; 3255 if (ieee80211_add_rx_params(m, &rxs) == 0) 3256 return false; 3257 3258 if (ieee80211_radiotap_active_vap(vap)) { 3259 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3260 3261 tap->wr_flags = 0; 3262 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3263 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3264 tap->wr_chan_freq = htole16(rxs.c_freq); 3265 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3266 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3267 tap->wr_dbm_antsignal = (int8_t)rssi; 3268 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3269 tap->wr_tsft = phy_info->system_timestamp; 3270 switch (phy_info->rate) { 3271 /* CCK rates. */ 3272 case 10: tap->wr_rate = 2; break; 3273 case 20: tap->wr_rate = 4; break; 3274 case 55: tap->wr_rate = 11; break; 3275 case 110: tap->wr_rate = 22; break; 3276 /* OFDM rates. */ 3277 case 0xd: tap->wr_rate = 12; break; 3278 case 0xf: tap->wr_rate = 18; break; 3279 case 0x5: tap->wr_rate = 24; break; 3280 case 0x7: tap->wr_rate = 36; break; 3281 case 0x9: tap->wr_rate = 48; break; 3282 case 0xb: tap->wr_rate = 72; break; 3283 case 0x1: tap->wr_rate = 96; break; 3284 case 0x3: tap->wr_rate = 108; break; 3285 /* Unknown rate: should not happen. */ 3286 default: tap->wr_rate = 0; 3287 } 3288 } 3289 3290 return true; 3291 } 3292 3293 static bool 3294 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3295 bool stolen) 3296 { 3297 struct ieee80211com *ic = &sc->sc_ic; 3298 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3299 struct ieee80211_frame *wh; 3300 struct ieee80211_rx_stats rxs; 3301 struct iwm_rx_mpdu_desc *desc; 3302 struct iwm_rx_packet *pkt; 3303 int rssi; 3304 uint32_t hdrlen, len, rate_n_flags; 3305 uint16_t phy_info; 3306 uint8_t channel; 3307 3308 pkt = mtodo(m, offset); 3309 desc = (void *)pkt->data; 3310 3311 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) || 3312 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) { 3313 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3314 "Bad CRC or FIFO: 0x%08X.\n", desc->status); 3315 return false; 3316 } 3317 3318 channel = desc->v1.channel; 3319 len = le16toh(desc->mpdu_len); 3320 phy_info = le16toh(desc->phy_info); 3321 rate_n_flags = desc->v1.rate_n_flags; 3322 3323 wh = mtodo(m, sizeof(*desc)); 3324 m->m_data = pkt->data + sizeof(*desc); 3325 m->m_pkthdr.len = m->m_len = len; 3326 m->m_len = len; 3327 3328 /* Account for padding following the frame header. */ 3329 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) { 3330 hdrlen = ieee80211_anyhdrsize(wh); 3331 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen); 3332 m->m_data = mtodo(m, 2); 3333 wh = mtod(m, struct ieee80211_frame *); 3334 } 3335 3336 /* Map it to relative value */ 3337 rssi = iwm_rxmq_get_signal_strength(sc, desc); 3338 rssi = rssi - sc->sc_noise; 3339 3340 /* replenish ring for the buffer we're going to feed to the sharks */ 3341 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3342 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3343 __func__); 3344 return false; 3345 } 3346 3347 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3348 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3349 3350 /* 3351 * Populate an RX state struct with the provided information. 3352 */ 3353 bzero(&rxs, sizeof(rxs)); 3354 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3355 rxs.r_flags |= IEEE80211_R_BAND; 3356 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3357 rxs.c_ieee = channel; 3358 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, 3359 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ); 3360 rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; 3361 3362 /* rssi is in 1/2db units */ 3363 rxs.c_rssi = rssi * 2; 3364 rxs.c_nf = sc->sc_noise; 3365 if (ieee80211_add_rx_params(m, &rxs) == 0) 3366 return false; 3367 3368 if (ieee80211_radiotap_active_vap(vap)) { 3369 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3370 3371 tap->wr_flags = 0; 3372 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0) 3373 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3374 tap->wr_chan_freq = htole16(rxs.c_freq); 3375 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3376 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3377 tap->wr_dbm_antsignal = (int8_t)rssi; 3378 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3379 tap->wr_tsft = desc->v1.gp2_on_air_rise; 3380 switch ((rate_n_flags & 0xff)) { 3381 /* CCK rates. */ 3382 case 10: tap->wr_rate = 2; break; 3383 case 20: tap->wr_rate = 4; break; 3384 case 55: tap->wr_rate = 11; break; 3385 case 110: tap->wr_rate = 22; break; 3386 /* OFDM rates. */ 3387 case 0xd: tap->wr_rate = 12; break; 3388 case 0xf: tap->wr_rate = 18; break; 3389 case 0x5: tap->wr_rate = 24; break; 3390 case 0x7: tap->wr_rate = 36; break; 3391 case 0x9: tap->wr_rate = 48; break; 3392 case 0xb: tap->wr_rate = 72; break; 3393 case 0x1: tap->wr_rate = 96; break; 3394 case 0x3: tap->wr_rate = 108; break; 3395 /* Unknown rate: should not happen. */ 3396 default: tap->wr_rate = 0; 3397 } 3398 } 3399 3400 return true; 3401 } 3402 3403 static bool 3404 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3405 bool stolen) 3406 { 3407 struct ieee80211com *ic; 3408 struct ieee80211_frame *wh; 3409 struct ieee80211_node *ni; 3410 bool ret; 3411 3412 ic = &sc->sc_ic; 3413 3414 ret = sc->cfg->mqrx_supported ? 3415 iwm_rx_mpdu_mq(sc, m, offset, stolen) : 3416 iwm_rx_rx_mpdu(sc, m, offset, stolen); 3417 if (!ret) { 3418 counter_u64_add(ic->ic_ierrors, 1); 3419 return (ret); 3420 } 3421 3422 wh = mtod(m, struct ieee80211_frame *); 3423 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3424 3425 IWM_UNLOCK(sc); 3426 if (ni != NULL) { 3427 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3428 ieee80211_input_mimo(ni, m); 3429 ieee80211_free_node(ni); 3430 } else { 3431 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3432 ieee80211_input_mimo_all(ic, m); 3433 } 3434 IWM_LOCK(sc); 3435 3436 return true; 3437 } 3438 3439 static int 3440 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3441 struct iwm_node *in) 3442 { 3443 struct iwm_tx_resp *tx_resp = (void *)pkt->data; 3444 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 3445 struct ieee80211_node *ni = &in->in_ni; 3446 struct ieee80211vap *vap = ni->ni_vap; 3447 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3448 int new_rate, cur_rate = vap->iv_bss->ni_txrate; 3449 boolean_t rate_matched; 3450 uint8_t tx_resp_rate; 3451 3452 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3453 3454 /* Update rate control statistics. */ 3455 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3456 __func__, 3457 (int) le16toh(tx_resp->status.status), 3458 (int) le16toh(tx_resp->status.sequence), 3459 tx_resp->frame_count, 3460 tx_resp->bt_kill_count, 3461 tx_resp->failure_rts, 3462 tx_resp->failure_frame, 3463 le32toh(tx_resp->initial_rate), 3464 (int) le16toh(tx_resp->wireless_media_time)); 3465 3466 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate)); 3467 3468 /* For rate control, ignore frames sent at different initial rate */ 3469 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate); 3470 3471 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) { 3472 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3473 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u " 3474 "ni_txrate=%d)\n", tx_resp_rate, cur_rate); 3475 } 3476 3477 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 3478 IEEE80211_RATECTL_STATUS_LONG_RETRY; 3479 txs->short_retries = tx_resp->failure_rts; 3480 txs->long_retries = tx_resp->failure_frame; 3481 if (status != IWM_TX_STATUS_SUCCESS && 3482 status != IWM_TX_STATUS_DIRECT_DONE) { 3483 switch (status) { 3484 case IWM_TX_STATUS_FAIL_SHORT_LIMIT: 3485 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 3486 break; 3487 case IWM_TX_STATUS_FAIL_LONG_LIMIT: 3488 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 3489 break; 3490 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE: 3491 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 3492 break; 3493 default: 3494 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 3495 break; 3496 } 3497 } else { 3498 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 3499 } 3500 3501 if (rate_matched) { 3502 ieee80211_ratectl_tx_complete(ni, txs); 3503 3504 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0); 3505 new_rate = vap->iv_bss->ni_txrate; 3506 if (new_rate != 0 && new_rate != cur_rate) { 3507 struct iwm_node *in = IWM_NODE(vap->iv_bss); 3508 iwm_setrates(sc, in, rix); 3509 iwm_send_lq_cmd(sc, &in->in_lq, FALSE); 3510 } 3511 } 3512 3513 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS); 3514 } 3515 3516 static void 3517 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3518 { 3519 struct iwm_cmd_header *cmd_hdr; 3520 struct iwm_tx_ring *ring; 3521 struct iwm_tx_data *txd; 3522 struct iwm_node *in; 3523 struct mbuf *m; 3524 int idx, qid, qmsk, status; 3525 3526 cmd_hdr = &pkt->hdr; 3527 idx = cmd_hdr->idx; 3528 qid = cmd_hdr->qid; 3529 3530 ring = &sc->txq[qid]; 3531 txd = &ring->data[idx]; 3532 in = txd->in; 3533 m = txd->m; 3534 3535 KASSERT(txd->done == 0, ("txd not done")); 3536 KASSERT(txd->in != NULL, ("txd without node")); 3537 KASSERT(txd->m != NULL, ("txd without mbuf")); 3538 3539 sc->sc_tx_timer = 0; 3540 3541 status = iwm_rx_tx_cmd_single(sc, pkt, in); 3542 3543 /* Unmap and free mbuf. */ 3544 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3545 bus_dmamap_unload(ring->data_dmat, txd->map); 3546 3547 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3548 "free txd %p, in %p\n", txd, txd->in); 3549 txd->done = 1; 3550 txd->m = NULL; 3551 txd->in = NULL; 3552 3553 ieee80211_tx_complete(&in->in_ni, m, status); 3554 3555 qmsk = 1 << qid; 3556 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) { 3557 sc->qfullmsk &= ~qmsk; 3558 if (sc->qfullmsk == 0) 3559 iwm_start(sc); 3560 } 3561 } 3562 3563 /* 3564 * transmit side 3565 */ 3566 3567 /* 3568 * Process a "command done" firmware notification. This is where we wakeup 3569 * processes waiting for a synchronous command completion. 3570 * from if_iwn 3571 */ 3572 static void 3573 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3574 { 3575 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE]; 3576 struct iwm_tx_data *data; 3577 3578 if (pkt->hdr.qid != IWM_CMD_QUEUE) { 3579 return; /* Not a command ack. */ 3580 } 3581 3582 /* XXX wide commands? */ 3583 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 3584 "cmd notification type 0x%x qid %d idx %d\n", 3585 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx); 3586 3587 data = &ring->data[pkt->hdr.idx]; 3588 3589 /* If the command was mapped in an mbuf, free it. */ 3590 if (data->m != NULL) { 3591 bus_dmamap_sync(ring->data_dmat, data->map, 3592 BUS_DMASYNC_POSTWRITE); 3593 bus_dmamap_unload(ring->data_dmat, data->map); 3594 m_freem(data->m); 3595 data->m = NULL; 3596 } 3597 wakeup(&ring->desc[pkt->hdr.idx]); 3598 3599 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) { 3600 device_printf(sc->sc_dev, 3601 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n", 3602 __func__, pkt->hdr.idx, ring->queued, ring->cur); 3603 /* XXX call iwm_force_nmi() */ 3604 } 3605 3606 KASSERT(ring->queued > 0, ("ring->queued is empty?")); 3607 ring->queued--; 3608 if (ring->queued == 0) 3609 iwm_pcie_clear_cmd_in_flight(sc); 3610 } 3611 3612 #if 0 3613 /* 3614 * necessary only for block ack mode 3615 */ 3616 void 3617 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3618 uint16_t len) 3619 { 3620 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3621 uint16_t w_val; 3622 3623 scd_bc_tbl = sc->sched_dma.vaddr; 3624 3625 len += 8; /* magic numbers came naturally from paris */ 3626 len = roundup(len, 4) / 4; 3627 3628 w_val = htole16(sta_id << 12 | len); 3629 3630 /* Update TX scheduler. */ 3631 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3632 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3633 BUS_DMASYNC_PREWRITE); 3634 3635 /* I really wonder what this is ?!? */ 3636 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3637 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3638 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3639 BUS_DMASYNC_PREWRITE); 3640 } 3641 } 3642 #endif 3643 3644 static int 3645 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate) 3646 { 3647 int i; 3648 3649 for (i = 0; i < nitems(iwm_rates); i++) { 3650 if (iwm_rates[i].rate == rate) 3651 return (i); 3652 } 3653 /* XXX error? */ 3654 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3655 "%s: couldn't find an entry for rate=%d\n", 3656 __func__, 3657 rate); 3658 return (0); 3659 } 3660 3661 /* 3662 * Fill in the rate related information for a transmit command. 3663 */ 3664 static const struct iwm_rate * 3665 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3666 struct mbuf *m, struct iwm_tx_cmd *tx) 3667 { 3668 struct ieee80211_node *ni = &in->in_ni; 3669 struct ieee80211_frame *wh; 3670 const struct ieee80211_txparam *tp = ni->ni_txparms; 3671 const struct iwm_rate *rinfo; 3672 int type; 3673 int ridx, rate_flags; 3674 3675 wh = mtod(m, struct ieee80211_frame *); 3676 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3677 3678 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3679 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3680 3681 if (type == IEEE80211_FC0_TYPE_MGT || 3682 type == IEEE80211_FC0_TYPE_CTL || 3683 (m->m_flags & M_EAPOL) != 0) { 3684 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3685 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3686 "%s: MGT (%d)\n", __func__, tp->mgmtrate); 3687 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3688 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate); 3689 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3690 "%s: MCAST (%d)\n", __func__, tp->mcastrate); 3691 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3692 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate); 3693 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3694 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate); 3695 } else { 3696 /* for data frames, use RS table */ 3697 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__); 3698 ridx = iwm_rate2ridx(sc, ni->ni_txrate); 3699 if (ridx == -1) 3700 ridx = 0; 3701 3702 /* This is the index into the programmed table */ 3703 tx->initial_rate_index = 0; 3704 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3705 } 3706 3707 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3708 "%s: frame type=%d txrate %d\n", 3709 __func__, type, iwm_rates[ridx].rate); 3710 3711 rinfo = &iwm_rates[ridx]; 3712 3713 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n", 3714 __func__, ridx, 3715 rinfo->rate, 3716 !! (IWM_RIDX_IS_CCK(ridx)) 3717 ); 3718 3719 /* XXX TODO: hard-coded TX antenna? */ 3720 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000) 3721 rate_flags = IWM_RATE_MCS_ANT_B_MSK; 3722 else 3723 rate_flags = IWM_RATE_MCS_ANT_A_MSK; 3724 if (IWM_RIDX_IS_CCK(ridx)) 3725 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3726 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3727 3728 return rinfo; 3729 } 3730 3731 #define TB0_SIZE 16 3732 static int 3733 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3734 { 3735 struct ieee80211com *ic = &sc->sc_ic; 3736 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3737 struct iwm_node *in = IWM_NODE(ni); 3738 struct iwm_tx_ring *ring; 3739 struct iwm_tx_data *data; 3740 struct iwm_tfd *desc; 3741 struct iwm_device_cmd *cmd; 3742 struct iwm_tx_cmd *tx; 3743 struct ieee80211_frame *wh; 3744 struct ieee80211_key *k = NULL; 3745 struct mbuf *m1; 3746 const struct iwm_rate *rinfo; 3747 uint32_t flags; 3748 u_int hdrlen; 3749 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3750 int nsegs; 3751 uint8_t tid, type; 3752 int i, totlen, error, pad; 3753 3754 wh = mtod(m, struct ieee80211_frame *); 3755 hdrlen = ieee80211_anyhdrsize(wh); 3756 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3757 tid = 0; 3758 ring = &sc->txq[ac]; 3759 desc = &ring->desc[ring->cur]; 3760 data = &ring->data[ring->cur]; 3761 3762 /* Fill out iwm_tx_cmd to send to the firmware */ 3763 cmd = &ring->cmd[ring->cur]; 3764 cmd->hdr.code = IWM_TX_CMD; 3765 cmd->hdr.flags = 0; 3766 cmd->hdr.qid = ring->qid; 3767 cmd->hdr.idx = ring->cur; 3768 3769 tx = (void *)cmd->data; 3770 memset(tx, 0, sizeof(*tx)); 3771 3772 rinfo = iwm_tx_fill_cmd(sc, in, m, tx); 3773 3774 /* Encrypt the frame if need be. */ 3775 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3776 /* Retrieve key for TX && do software encryption. */ 3777 k = ieee80211_crypto_encap(ni, m); 3778 if (k == NULL) { 3779 m_freem(m); 3780 return (ENOBUFS); 3781 } 3782 /* 802.11 header may have moved. */ 3783 wh = mtod(m, struct ieee80211_frame *); 3784 } 3785 3786 if (ieee80211_radiotap_active_vap(vap)) { 3787 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3788 3789 tap->wt_flags = 0; 3790 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3791 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3792 tap->wt_rate = rinfo->rate; 3793 if (k != NULL) 3794 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3795 ieee80211_radiotap_tx(vap, m); 3796 } 3797 3798 flags = 0; 3799 totlen = m->m_pkthdr.len; 3800 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3801 flags |= IWM_TX_CMD_FLG_ACK; 3802 } 3803 3804 if (type == IEEE80211_FC0_TYPE_DATA && 3805 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold && 3806 !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3807 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3808 } 3809 3810 tx->sta_id = IWM_STATION_ID; 3811 3812 if (type == IEEE80211_FC0_TYPE_MGT) { 3813 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3814 3815 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3816 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { 3817 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC); 3818 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) { 3819 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3820 } else { 3821 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT); 3822 } 3823 } else { 3824 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3825 } 3826 3827 if (hdrlen & 3) { 3828 /* First segment length must be a multiple of 4. */ 3829 flags |= IWM_TX_CMD_FLG_MH_PAD; 3830 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD); 3831 pad = 4 - (hdrlen & 3); 3832 } else { 3833 tx->offload_assist = 0; 3834 pad = 0; 3835 } 3836 3837 tx->len = htole16(totlen); 3838 tx->tid_tspec = tid; 3839 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3840 3841 /* Set physical address of "scratch area". */ 3842 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3843 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3844 3845 /* Copy 802.11 header in TX command. */ 3846 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen); 3847 3848 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3849 3850 tx->sec_ctl = 0; 3851 tx->tx_flags |= htole32(flags); 3852 3853 /* Trim 802.11 header. */ 3854 m_adj(m, hdrlen); 3855 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3856 segs, &nsegs, BUS_DMA_NOWAIT); 3857 if (error != 0) { 3858 if (error != EFBIG) { 3859 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3860 error); 3861 m_freem(m); 3862 return error; 3863 } 3864 /* Too many DMA segments, linearize mbuf. */ 3865 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 3866 if (m1 == NULL) { 3867 device_printf(sc->sc_dev, 3868 "%s: could not defrag mbuf\n", __func__); 3869 m_freem(m); 3870 return (ENOBUFS); 3871 } 3872 m = m1; 3873 3874 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3875 segs, &nsegs, BUS_DMA_NOWAIT); 3876 if (error != 0) { 3877 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3878 error); 3879 m_freem(m); 3880 return error; 3881 } 3882 } 3883 data->m = m; 3884 data->in = in; 3885 data->done = 0; 3886 3887 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3888 "sending txd %p, in %p\n", data, data->in); 3889 KASSERT(data->in != NULL, ("node is NULL")); 3890 3891 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3892 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3893 ring->qid, ring->cur, totlen, nsegs, 3894 le32toh(tx->tx_flags), 3895 le32toh(tx->rate_n_flags), 3896 tx->initial_rate_index 3897 ); 3898 3899 /* Fill TX descriptor. */ 3900 memset(desc, 0, sizeof(*desc)); 3901 desc->num_tbs = 2 + nsegs; 3902 3903 desc->tbs[0].lo = htole32(data->cmd_paddr); 3904 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | 3905 (TB0_SIZE << 4)); 3906 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3907 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) | 3908 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) + 3909 hdrlen + pad - TB0_SIZE) << 4)); 3910 3911 /* Other DMA segments are for data payload. */ 3912 for (i = 0; i < nsegs; i++) { 3913 seg = &segs[i]; 3914 desc->tbs[i + 2].lo = htole32(seg->ds_addr); 3915 desc->tbs[i + 2].hi_n_len = 3916 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) | 3917 (seg->ds_len << 4); 3918 } 3919 3920 bus_dmamap_sync(ring->data_dmat, data->map, 3921 BUS_DMASYNC_PREWRITE); 3922 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3923 BUS_DMASYNC_PREWRITE); 3924 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3925 BUS_DMASYNC_PREWRITE); 3926 3927 #if 0 3928 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3929 #endif 3930 3931 /* Kick TX ring. */ 3932 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3933 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3934 3935 /* Mark TX ring as full if we reach a certain threshold. */ 3936 if (++ring->queued > IWM_TX_RING_HIMARK) { 3937 sc->qfullmsk |= 1 << ring->qid; 3938 } 3939 3940 return 0; 3941 } 3942 3943 static int 3944 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3945 const struct ieee80211_bpf_params *params) 3946 { 3947 struct ieee80211com *ic = ni->ni_ic; 3948 struct iwm_softc *sc = ic->ic_softc; 3949 int error = 0; 3950 3951 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3952 "->%s begin\n", __func__); 3953 3954 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3955 m_freem(m); 3956 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3957 "<-%s not RUNNING\n", __func__); 3958 return (ENETDOWN); 3959 } 3960 3961 IWM_LOCK(sc); 3962 /* XXX fix this */ 3963 if (params == NULL) { 3964 error = iwm_tx(sc, m, ni, 0); 3965 } else { 3966 error = iwm_tx(sc, m, ni, 0); 3967 } 3968 if (sc->sc_tx_timer == 0) 3969 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 3970 sc->sc_tx_timer = 5; 3971 IWM_UNLOCK(sc); 3972 3973 return (error); 3974 } 3975 3976 /* 3977 * mvm/tx.c 3978 */ 3979 3980 /* 3981 * Note that there are transports that buffer frames before they reach 3982 * the firmware. This means that after flush_tx_path is called, the 3983 * queue might not be empty. The race-free way to handle this is to: 3984 * 1) set the station as draining 3985 * 2) flush the Tx path 3986 * 3) wait for the transport queues to be empty 3987 */ 3988 int 3989 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags) 3990 { 3991 int ret; 3992 struct iwm_tx_path_flush_cmd_v1 flush_cmd = { 3993 .queues_ctl = htole32(tfd_msk), 3994 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3995 }; 3996 3997 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags, 3998 sizeof(flush_cmd), &flush_cmd); 3999 if (ret) 4000 device_printf(sc->sc_dev, 4001 "Flushing tx queue failed: %d\n", ret); 4002 return ret; 4003 } 4004 4005 /* 4006 * BEGIN mvm/quota.c 4007 */ 4008 4009 static int 4010 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp) 4011 { 4012 struct iwm_time_quota_cmd_v1 cmd; 4013 int i, idx, ret, num_active_macs, quota, quota_rem; 4014 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 4015 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 4016 uint16_t id; 4017 4018 memset(&cmd, 0, sizeof(cmd)); 4019 4020 /* currently, PHY ID == binding ID */ 4021 if (ivp) { 4022 id = ivp->phy_ctxt->id; 4023 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 4024 colors[id] = ivp->phy_ctxt->color; 4025 4026 if (1) 4027 n_ifs[id] = 1; 4028 } 4029 4030 /* 4031 * The FW's scheduling session consists of 4032 * IWM_MAX_QUOTA fragments. Divide these fragments 4033 * equally between all the bindings that require quota 4034 */ 4035 num_active_macs = 0; 4036 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 4037 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 4038 num_active_macs += n_ifs[i]; 4039 } 4040 4041 quota = 0; 4042 quota_rem = 0; 4043 if (num_active_macs) { 4044 quota = IWM_MAX_QUOTA / num_active_macs; 4045 quota_rem = IWM_MAX_QUOTA % num_active_macs; 4046 } 4047 4048 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 4049 if (colors[i] < 0) 4050 continue; 4051 4052 cmd.quotas[idx].id_and_color = 4053 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 4054 4055 if (n_ifs[i] <= 0) { 4056 cmd.quotas[idx].quota = htole32(0); 4057 cmd.quotas[idx].max_duration = htole32(0); 4058 } else { 4059 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 4060 cmd.quotas[idx].max_duration = htole32(0); 4061 } 4062 idx++; 4063 } 4064 4065 /* Give the remainder of the session to the first binding */ 4066 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 4067 4068 ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 4069 sizeof(cmd), &cmd); 4070 if (ret) 4071 device_printf(sc->sc_dev, 4072 "%s: Failed to send quota: %d\n", __func__, ret); 4073 return ret; 4074 } 4075 4076 /* 4077 * END mvm/quota.c 4078 */ 4079 4080 /* 4081 * ieee80211 routines 4082 */ 4083 4084 /* 4085 * Change to AUTH state in 80211 state machine. Roughly matches what 4086 * Linux does in bss_info_changed(). 4087 */ 4088 static int 4089 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 4090 { 4091 struct ieee80211_node *ni; 4092 struct iwm_node *in; 4093 struct iwm_vap *iv = IWM_VAP(vap); 4094 uint32_t duration; 4095 int error; 4096 4097 /* 4098 * XXX i have a feeling that the vap node is being 4099 * freed from underneath us. Grr. 4100 */ 4101 ni = ieee80211_ref_node(vap->iv_bss); 4102 in = IWM_NODE(ni); 4103 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 4104 "%s: called; vap=%p, bss ni=%p\n", 4105 __func__, 4106 vap, 4107 ni); 4108 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n", 4109 __func__, ether_sprintf(ni->ni_bssid)); 4110 4111 in->in_assoc = 0; 4112 iv->iv_auth = 1; 4113 4114 /* 4115 * Firmware bug - it'll crash if the beacon interval is less 4116 * than 16. We can't avoid connecting at all, so refuse the 4117 * station state change, this will cause net80211 to abandon 4118 * attempts to connect to this AP, and eventually wpa_s will 4119 * blacklist the AP... 4120 */ 4121 if (ni->ni_intval < 16) { 4122 device_printf(sc->sc_dev, 4123 "AP %s beacon interval is %d, refusing due to firmware bug!\n", 4124 ether_sprintf(ni->ni_bssid), ni->ni_intval); 4125 error = EINVAL; 4126 goto out; 4127 } 4128 4129 error = iwm_allow_mcast(vap, sc); 4130 if (error) { 4131 device_printf(sc->sc_dev, 4132 "%s: failed to set multicast\n", __func__); 4133 goto out; 4134 } 4135 4136 /* 4137 * This is where it deviates from what Linux does. 4138 * 4139 * Linux iwlwifi doesn't reset the nic each time, nor does it 4140 * call ctxt_add() here. Instead, it adds it during vap creation, 4141 * and always does a mac_ctx_changed(). 4142 * 4143 * The openbsd port doesn't attempt to do that - it reset things 4144 * at odd states and does the add here. 4145 * 4146 * So, until the state handling is fixed (ie, we never reset 4147 * the NIC except for a firmware failure, which should drag 4148 * the NIC back to IDLE, re-setup and re-add all the mac/phy 4149 * contexts that are required), let's do a dirty hack here. 4150 */ 4151 if (iv->is_uploaded) { 4152 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) { 4153 device_printf(sc->sc_dev, 4154 "%s: failed to update MAC\n", __func__); 4155 goto out; 4156 } 4157 } else { 4158 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) { 4159 device_printf(sc->sc_dev, 4160 "%s: failed to add MAC\n", __func__); 4161 goto out; 4162 } 4163 } 4164 sc->sc_firmware_state = 1; 4165 4166 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 4167 in->in_ni.ni_chan, 1, 1)) != 0) { 4168 device_printf(sc->sc_dev, 4169 "%s: failed update phy ctxt\n", __func__); 4170 goto out; 4171 } 4172 iv->phy_ctxt = &sc->sc_phyctxt[0]; 4173 4174 if ((error = iwm_binding_add_vif(sc, iv)) != 0) { 4175 device_printf(sc->sc_dev, 4176 "%s: binding update cmd\n", __func__); 4177 goto out; 4178 } 4179 sc->sc_firmware_state = 2; 4180 /* 4181 * Authentication becomes unreliable when powersaving is left enabled 4182 * here. Powersaving will be activated again when association has 4183 * finished or is aborted. 4184 */ 4185 iv->ps_disabled = TRUE; 4186 error = iwm_power_update_mac(sc); 4187 iv->ps_disabled = FALSE; 4188 if (error != 0) { 4189 device_printf(sc->sc_dev, 4190 "%s: failed to update power management\n", 4191 __func__); 4192 goto out; 4193 } 4194 if ((error = iwm_add_sta(sc, in)) != 0) { 4195 device_printf(sc->sc_dev, 4196 "%s: failed to add sta\n", __func__); 4197 goto out; 4198 } 4199 sc->sc_firmware_state = 3; 4200 4201 /* 4202 * Prevent the FW from wandering off channel during association 4203 * by "protecting" the session with a time event. 4204 */ 4205 /* XXX duration is in units of TU, not MS */ 4206 duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS; 4207 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE); 4208 4209 error = 0; 4210 out: 4211 if (error != 0) 4212 iv->iv_auth = 0; 4213 ieee80211_free_node(ni); 4214 return (error); 4215 } 4216 4217 static struct ieee80211_node * 4218 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4219 { 4220 return malloc(sizeof (struct iwm_node), M_80211_NODE, 4221 M_NOWAIT | M_ZERO); 4222 } 4223 4224 static uint8_t 4225 iwm_rate_from_ucode_rate(uint32_t rate_n_flags) 4226 { 4227 uint8_t plcp = rate_n_flags & 0xff; 4228 int i; 4229 4230 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4231 if (iwm_rates[i].plcp == plcp) 4232 return iwm_rates[i].rate; 4233 } 4234 return 0; 4235 } 4236 4237 uint8_t 4238 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx) 4239 { 4240 int i; 4241 uint8_t rval; 4242 4243 for (i = 0; i < rs->rs_nrates; i++) { 4244 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 4245 if (rval == iwm_rates[ridx].rate) 4246 return rs->rs_rates[i]; 4247 } 4248 4249 return 0; 4250 } 4251 4252 static int 4253 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate) 4254 { 4255 int i; 4256 4257 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4258 if (iwm_rates[i].rate == rate) 4259 return i; 4260 } 4261 4262 device_printf(sc->sc_dev, 4263 "%s: WARNING: device rate for %u not found!\n", 4264 __func__, rate); 4265 4266 return -1; 4267 } 4268 4269 4270 static void 4271 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix) 4272 { 4273 struct ieee80211_node *ni = &in->in_ni; 4274 struct iwm_lq_cmd *lq = &in->in_lq; 4275 struct ieee80211_rateset *rs = &ni->ni_rates; 4276 int nrates = rs->rs_nrates; 4277 int i, ridx, tab = 0; 4278 // int txant = 0; 4279 4280 KASSERT(rix >= 0 && rix < nrates, ("invalid rix")); 4281 4282 if (nrates > nitems(lq->rs_table)) { 4283 device_printf(sc->sc_dev, 4284 "%s: node supports %d rates, driver handles " 4285 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4286 return; 4287 } 4288 if (nrates == 0) { 4289 device_printf(sc->sc_dev, 4290 "%s: node supports 0 rates, odd!\n", __func__); 4291 return; 4292 } 4293 nrates = imin(rix + 1, nrates); 4294 4295 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4296 "%s: nrates=%d\n", __func__, nrates); 4297 4298 /* then construct a lq_cmd based on those */ 4299 memset(lq, 0, sizeof(*lq)); 4300 lq->sta_id = IWM_STATION_ID; 4301 4302 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4303 if (ni->ni_flags & IEEE80211_NODE_HT) 4304 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4305 4306 /* 4307 * are these used? (we don't do SISO or MIMO) 4308 * need to set them to non-zero, though, or we get an error. 4309 */ 4310 lq->single_stream_ant_msk = 1; 4311 lq->dual_stream_ant_msk = 1; 4312 4313 /* 4314 * Build the actual rate selection table. 4315 * The lowest bits are the rates. Additionally, 4316 * CCK needs bit 9 to be set. The rest of the bits 4317 * we add to the table select the tx antenna 4318 * Note that we add the rates in the highest rate first 4319 * (opposite of ni_rates). 4320 */ 4321 for (i = 0; i < nrates; i++) { 4322 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL; 4323 int nextant; 4324 4325 /* Map 802.11 rate to HW rate index. */ 4326 ridx = iwm_rate2ridx(sc, rate); 4327 if (ridx == -1) 4328 continue; 4329 4330 #if 0 4331 if (txant == 0) 4332 txant = iwm_get_valid_tx_ant(sc); 4333 nextant = 1<<(ffs(txant)-1); 4334 txant &= ~nextant; 4335 #else 4336 nextant = iwm_get_valid_tx_ant(sc); 4337 #endif 4338 tab = iwm_rates[ridx].plcp; 4339 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4340 if (IWM_RIDX_IS_CCK(ridx)) 4341 tab |= IWM_RATE_MCS_CCK_MSK; 4342 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4343 "station rate i=%d, rate=%d, hw=%x\n", 4344 i, iwm_rates[ridx].rate, tab); 4345 lq->rs_table[i] = htole32(tab); 4346 } 4347 /* then fill the rest with the lowest possible rate */ 4348 for (i = nrates; i < nitems(lq->rs_table); i++) { 4349 KASSERT(tab != 0, ("invalid tab")); 4350 lq->rs_table[i] = htole32(tab); 4351 } 4352 } 4353 4354 static void 4355 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap) 4356 { 4357 struct iwm_vap *ivp = IWM_VAP(vap); 4358 int error; 4359 4360 /* Avoid Tx watchdog triggering, when transfers get dropped here. */ 4361 sc->sc_tx_timer = 0; 4362 4363 ivp->iv_auth = 0; 4364 if (sc->sc_firmware_state == 3) { 4365 iwm_xmit_queue_drain(sc); 4366 // iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC); 4367 error = iwm_rm_sta(sc, vap, TRUE); 4368 if (error) { 4369 device_printf(sc->sc_dev, 4370 "%s: Failed to remove station: %d\n", 4371 __func__, error); 4372 } 4373 } 4374 if (sc->sc_firmware_state == 3) { 4375 error = iwm_mac_ctxt_changed(sc, vap); 4376 if (error) { 4377 device_printf(sc->sc_dev, 4378 "%s: Failed to change mac context: %d\n", 4379 __func__, error); 4380 } 4381 } 4382 if (sc->sc_firmware_state == 3) { 4383 error = iwm_sf_update(sc, vap, FALSE); 4384 if (error) { 4385 device_printf(sc->sc_dev, 4386 "%s: Failed to update smart FIFO: %d\n", 4387 __func__, error); 4388 } 4389 } 4390 if (sc->sc_firmware_state == 3) { 4391 error = iwm_rm_sta_id(sc, vap); 4392 if (error) { 4393 device_printf(sc->sc_dev, 4394 "%s: Failed to remove station id: %d\n", 4395 __func__, error); 4396 } 4397 } 4398 if (sc->sc_firmware_state == 3) { 4399 error = iwm_update_quotas(sc, NULL); 4400 if (error) { 4401 device_printf(sc->sc_dev, 4402 "%s: Failed to update PHY quota: %d\n", 4403 __func__, error); 4404 } 4405 } 4406 if (sc->sc_firmware_state == 3) { 4407 /* XXX Might need to specify bssid correctly. */ 4408 error = iwm_mac_ctxt_changed(sc, vap); 4409 if (error) { 4410 device_printf(sc->sc_dev, 4411 "%s: Failed to change mac context: %d\n", 4412 __func__, error); 4413 } 4414 } 4415 if (sc->sc_firmware_state == 3) { 4416 sc->sc_firmware_state = 2; 4417 } 4418 if (sc->sc_firmware_state > 1) { 4419 error = iwm_binding_remove_vif(sc, ivp); 4420 if (error) { 4421 device_printf(sc->sc_dev, 4422 "%s: Failed to remove channel ctx: %d\n", 4423 __func__, error); 4424 } 4425 } 4426 if (sc->sc_firmware_state > 1) { 4427 sc->sc_firmware_state = 1; 4428 } 4429 ivp->phy_ctxt = NULL; 4430 if (sc->sc_firmware_state > 0) { 4431 error = iwm_mac_ctxt_changed(sc, vap); 4432 if (error) { 4433 device_printf(sc->sc_dev, 4434 "%s: Failed to change mac context: %d\n", 4435 __func__, error); 4436 } 4437 } 4438 if (sc->sc_firmware_state > 0) { 4439 error = iwm_power_update_mac(sc); 4440 if (error != 0) { 4441 device_printf(sc->sc_dev, 4442 "%s: failed to update power management\n", 4443 __func__); 4444 } 4445 } 4446 sc->sc_firmware_state = 0; 4447 } 4448 4449 static int 4450 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4451 { 4452 struct iwm_vap *ivp = IWM_VAP(vap); 4453 struct ieee80211com *ic = vap->iv_ic; 4454 struct iwm_softc *sc = ic->ic_softc; 4455 struct iwm_node *in; 4456 int error; 4457 4458 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4459 "switching state %s -> %s arg=0x%x\n", 4460 ieee80211_state_name[vap->iv_state], 4461 ieee80211_state_name[nstate], 4462 arg); 4463 4464 IEEE80211_UNLOCK(ic); 4465 IWM_LOCK(sc); 4466 4467 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) && 4468 (nstate == IEEE80211_S_AUTH || 4469 nstate == IEEE80211_S_ASSOC || 4470 nstate == IEEE80211_S_RUN)) { 4471 /* Stop blinking for a scan, when authenticating. */ 4472 iwm_led_blink_stop(sc); 4473 } 4474 4475 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 4476 iwm_led_disable(sc); 4477 /* disable beacon filtering if we're hopping out of RUN */ 4478 iwm_disable_beacon_filter(sc); 4479 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4480 in->in_assoc = 0; 4481 } 4482 4483 if ((vap->iv_state == IEEE80211_S_AUTH || 4484 vap->iv_state == IEEE80211_S_ASSOC || 4485 vap->iv_state == IEEE80211_S_RUN) && 4486 (nstate == IEEE80211_S_INIT || 4487 nstate == IEEE80211_S_SCAN || 4488 nstate == IEEE80211_S_AUTH)) { 4489 iwm_stop_session_protection(sc, ivp); 4490 } 4491 4492 if ((vap->iv_state == IEEE80211_S_RUN || 4493 vap->iv_state == IEEE80211_S_ASSOC) && 4494 nstate == IEEE80211_S_INIT) { 4495 /* 4496 * In this case, iv_newstate() wants to send an 80211 frame on 4497 * the network that we are leaving. So we need to call it, 4498 * before tearing down all the firmware state. 4499 */ 4500 IWM_UNLOCK(sc); 4501 IEEE80211_LOCK(ic); 4502 ivp->iv_newstate(vap, nstate, arg); 4503 IEEE80211_UNLOCK(ic); 4504 IWM_LOCK(sc); 4505 iwm_bring_down_firmware(sc, vap); 4506 IWM_UNLOCK(sc); 4507 IEEE80211_LOCK(ic); 4508 return 0; 4509 } 4510 4511 switch (nstate) { 4512 case IEEE80211_S_INIT: 4513 case IEEE80211_S_SCAN: 4514 break; 4515 4516 case IEEE80211_S_AUTH: 4517 iwm_bring_down_firmware(sc, vap); 4518 if ((error = iwm_auth(vap, sc)) != 0) { 4519 device_printf(sc->sc_dev, 4520 "%s: could not move to auth state: %d\n", 4521 __func__, error); 4522 iwm_bring_down_firmware(sc, vap); 4523 IWM_UNLOCK(sc); 4524 IEEE80211_LOCK(ic); 4525 return 1; 4526 } 4527 break; 4528 4529 case IEEE80211_S_ASSOC: 4530 /* 4531 * EBS may be disabled due to previous failures reported by FW. 4532 * Reset EBS status here assuming environment has been changed. 4533 */ 4534 sc->last_ebs_successful = TRUE; 4535 break; 4536 4537 case IEEE80211_S_RUN: 4538 in = IWM_NODE(vap->iv_bss); 4539 /* Update the association state, now we have it all */ 4540 /* (eg associd comes in at this point */ 4541 error = iwm_update_sta(sc, in); 4542 if (error != 0) { 4543 device_printf(sc->sc_dev, 4544 "%s: failed to update STA\n", __func__); 4545 IWM_UNLOCK(sc); 4546 IEEE80211_LOCK(ic); 4547 return error; 4548 } 4549 in->in_assoc = 1; 4550 error = iwm_mac_ctxt_changed(sc, vap); 4551 if (error != 0) { 4552 device_printf(sc->sc_dev, 4553 "%s: failed to update MAC: %d\n", __func__, error); 4554 } 4555 4556 iwm_sf_update(sc, vap, FALSE); 4557 iwm_enable_beacon_filter(sc, ivp); 4558 iwm_power_update_mac(sc); 4559 iwm_update_quotas(sc, ivp); 4560 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0); 4561 iwm_setrates(sc, in, rix); 4562 4563 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) { 4564 device_printf(sc->sc_dev, 4565 "%s: IWM_LQ_CMD failed: %d\n", __func__, error); 4566 } 4567 4568 iwm_led_enable(sc); 4569 break; 4570 4571 default: 4572 break; 4573 } 4574 IWM_UNLOCK(sc); 4575 IEEE80211_LOCK(ic); 4576 4577 return (ivp->iv_newstate(vap, nstate, arg)); 4578 } 4579 4580 void 4581 iwm_endscan_cb(void *arg, int pending) 4582 { 4583 struct iwm_softc *sc = arg; 4584 struct ieee80211com *ic = &sc->sc_ic; 4585 4586 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4587 "%s: scan ended\n", 4588 __func__); 4589 4590 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4591 } 4592 4593 static int 4594 iwm_send_bt_init_conf(struct iwm_softc *sc) 4595 { 4596 struct iwm_bt_coex_cmd bt_cmd; 4597 4598 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4599 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4600 4601 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4602 &bt_cmd); 4603 } 4604 4605 static boolean_t 4606 iwm_is_lar_supported(struct iwm_softc *sc) 4607 { 4608 boolean_t nvm_lar = sc->nvm_data->lar_enabled; 4609 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT); 4610 4611 if (iwm_lar_disable) 4612 return FALSE; 4613 4614 /* 4615 * Enable LAR only if it is supported by the FW (TLV) && 4616 * enabled in the NVM 4617 */ 4618 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) 4619 return nvm_lar && tlv_lar; 4620 else 4621 return tlv_lar; 4622 } 4623 4624 static boolean_t 4625 iwm_is_wifi_mcc_supported(struct iwm_softc *sc) 4626 { 4627 return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4628 iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC); 4629 } 4630 4631 static int 4632 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4633 { 4634 struct iwm_mcc_update_cmd mcc_cmd; 4635 struct iwm_host_cmd hcmd = { 4636 .id = IWM_MCC_UPDATE_CMD, 4637 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4638 .data = { &mcc_cmd }, 4639 }; 4640 int ret; 4641 #ifdef IWM_DEBUG 4642 struct iwm_rx_packet *pkt; 4643 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4644 struct iwm_mcc_update_resp_v2 *mcc_resp; 4645 int n_channels; 4646 uint16_t mcc; 4647 #endif 4648 int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4649 4650 if (!iwm_is_lar_supported(sc)) { 4651 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n", 4652 __func__); 4653 return 0; 4654 } 4655 4656 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4657 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4658 if (iwm_is_wifi_mcc_supported(sc)) 4659 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4660 else 4661 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4662 4663 if (resp_v2) 4664 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4665 else 4666 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4667 4668 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4669 "send MCC update to FW with '%c%c' src = %d\n", 4670 alpha2[0], alpha2[1], mcc_cmd.source_id); 4671 4672 ret = iwm_send_cmd(sc, &hcmd); 4673 if (ret) 4674 return ret; 4675 4676 #ifdef IWM_DEBUG 4677 pkt = hcmd.resp_pkt; 4678 4679 /* Extract MCC response */ 4680 if (resp_v2) { 4681 mcc_resp = (void *)pkt->data; 4682 mcc = mcc_resp->mcc; 4683 n_channels = le32toh(mcc_resp->n_channels); 4684 } else { 4685 mcc_resp_v1 = (void *)pkt->data; 4686 mcc = mcc_resp_v1->mcc; 4687 n_channels = le32toh(mcc_resp_v1->n_channels); 4688 } 4689 4690 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4691 if (mcc == 0) 4692 mcc = 0x3030; /* "00" - world */ 4693 4694 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4695 "regulatory domain '%c%c' (%d channels available)\n", 4696 mcc >> 8, mcc & 0xff, n_channels); 4697 #endif 4698 iwm_free_resp(sc, &hcmd); 4699 4700 return 0; 4701 } 4702 4703 static void 4704 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4705 { 4706 struct iwm_host_cmd cmd = { 4707 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4708 .len = { sizeof(uint32_t), }, 4709 .data = { &backoff, }, 4710 }; 4711 4712 if (iwm_send_cmd(sc, &cmd) != 0) { 4713 device_printf(sc->sc_dev, 4714 "failed to change thermal tx backoff\n"); 4715 } 4716 } 4717 4718 static int 4719 iwm_init_hw(struct iwm_softc *sc) 4720 { 4721 struct ieee80211com *ic = &sc->sc_ic; 4722 int error, i, ac; 4723 4724 sc->sf_state = IWM_SF_UNINIT; 4725 4726 if ((error = iwm_start_hw(sc)) != 0) { 4727 printf("iwm_start_hw: failed %d\n", error); 4728 return error; 4729 } 4730 4731 if ((error = iwm_run_init_ucode(sc, 0)) != 0) { 4732 printf("iwm_run_init_ucode: failed %d\n", error); 4733 return error; 4734 } 4735 4736 /* 4737 * should stop and start HW since that INIT 4738 * image just loaded 4739 */ 4740 iwm_stop_device(sc); 4741 sc->sc_ps_disabled = FALSE; 4742 if ((error = iwm_start_hw(sc)) != 0) { 4743 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4744 return error; 4745 } 4746 4747 /* omstart, this time with the regular firmware */ 4748 error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR); 4749 if (error) { 4750 device_printf(sc->sc_dev, "could not load firmware\n"); 4751 goto error; 4752 } 4753 4754 error = iwm_sf_update(sc, NULL, FALSE); 4755 if (error) 4756 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n"); 4757 4758 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4759 device_printf(sc->sc_dev, "bt init conf failed\n"); 4760 goto error; 4761 } 4762 4763 error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc)); 4764 if (error != 0) { 4765 device_printf(sc->sc_dev, "antenna config failed\n"); 4766 goto error; 4767 } 4768 4769 /* Send phy db control command and then phy db calibration */ 4770 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0) 4771 goto error; 4772 4773 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4774 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4775 goto error; 4776 } 4777 4778 /* Add auxiliary station for scanning */ 4779 if ((error = iwm_add_aux_sta(sc)) != 0) { 4780 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4781 goto error; 4782 } 4783 4784 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4785 /* 4786 * The channel used here isn't relevant as it's 4787 * going to be overwritten in the other flows. 4788 * For now use the first channel we have. 4789 */ 4790 if ((error = iwm_phy_ctxt_add(sc, 4791 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4792 goto error; 4793 } 4794 4795 /* Initialize tx backoffs to the minimum. */ 4796 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 4797 iwm_tt_tx_backoff(sc, 0); 4798 4799 if (iwm_config_ltr(sc) != 0) 4800 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n"); 4801 4802 error = iwm_power_update_device(sc); 4803 if (error) 4804 goto error; 4805 4806 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4807 goto error; 4808 4809 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4810 if ((error = iwm_config_umac_scan(sc)) != 0) 4811 goto error; 4812 } 4813 4814 /* Enable Tx queues. */ 4815 for (ac = 0; ac < WME_NUM_AC; ac++) { 4816 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4817 iwm_ac_to_tx_fifo[ac]); 4818 if (error) 4819 goto error; 4820 } 4821 4822 if ((error = iwm_disable_beacon_filter(sc)) != 0) { 4823 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4824 goto error; 4825 } 4826 4827 return 0; 4828 4829 error: 4830 iwm_stop_device(sc); 4831 return error; 4832 } 4833 4834 /* Allow multicast from our BSSID. */ 4835 static int 4836 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4837 { 4838 struct ieee80211_node *ni = vap->iv_bss; 4839 struct iwm_mcast_filter_cmd *cmd; 4840 size_t size; 4841 int error; 4842 4843 size = roundup(sizeof(*cmd), 4); 4844 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 4845 if (cmd == NULL) 4846 return ENOMEM; 4847 cmd->filter_own = 1; 4848 cmd->port_id = 0; 4849 cmd->count = 0; 4850 cmd->pass_all = 1; 4851 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4852 4853 error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4854 IWM_CMD_SYNC, size, cmd); 4855 free(cmd, M_DEVBUF); 4856 4857 return (error); 4858 } 4859 4860 /* 4861 * ifnet interfaces 4862 */ 4863 4864 static void 4865 iwm_init(struct iwm_softc *sc) 4866 { 4867 int error; 4868 4869 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4870 return; 4871 } 4872 sc->sc_generation++; 4873 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4874 4875 if ((error = iwm_init_hw(sc)) != 0) { 4876 printf("iwm_init_hw failed %d\n", error); 4877 iwm_stop(sc); 4878 return; 4879 } 4880 4881 /* 4882 * Ok, firmware loaded and we are jogging 4883 */ 4884 sc->sc_flags |= IWM_FLAG_HW_INITED; 4885 } 4886 4887 static int 4888 iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4889 { 4890 struct iwm_softc *sc; 4891 int error; 4892 4893 sc = ic->ic_softc; 4894 4895 IWM_LOCK(sc); 4896 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4897 IWM_UNLOCK(sc); 4898 return (ENXIO); 4899 } 4900 error = mbufq_enqueue(&sc->sc_snd, m); 4901 if (error) { 4902 IWM_UNLOCK(sc); 4903 return (error); 4904 } 4905 iwm_start(sc); 4906 IWM_UNLOCK(sc); 4907 return (0); 4908 } 4909 4910 /* 4911 * Dequeue packets from sendq and call send. 4912 */ 4913 static void 4914 iwm_start(struct iwm_softc *sc) 4915 { 4916 struct ieee80211_node *ni; 4917 struct mbuf *m; 4918 int ac = 0; 4919 4920 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4921 while (sc->qfullmsk == 0 && 4922 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4923 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4924 if (iwm_tx(sc, m, ni, ac) != 0) { 4925 if_inc_counter(ni->ni_vap->iv_ifp, 4926 IFCOUNTER_OERRORS, 1); 4927 ieee80211_free_node(ni); 4928 continue; 4929 } 4930 if (sc->sc_tx_timer == 0) { 4931 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, 4932 sc); 4933 } 4934 sc->sc_tx_timer = 15; 4935 } 4936 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4937 } 4938 4939 static void 4940 iwm_stop(struct iwm_softc *sc) 4941 { 4942 4943 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 4944 sc->sc_flags |= IWM_FLAG_STOPPED; 4945 sc->sc_generation++; 4946 iwm_led_blink_stop(sc); 4947 sc->sc_tx_timer = 0; 4948 iwm_stop_device(sc); 4949 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 4950 } 4951 4952 static void 4953 iwm_watchdog(void *arg) 4954 { 4955 struct iwm_softc *sc = arg; 4956 struct ieee80211com *ic = &sc->sc_ic; 4957 4958 if (sc->sc_attached == 0) 4959 return; 4960 4961 if (sc->sc_tx_timer > 0) { 4962 if (--sc->sc_tx_timer == 0) { 4963 device_printf(sc->sc_dev, "device timeout\n"); 4964 #ifdef IWM_DEBUG 4965 iwm_nic_error(sc); 4966 #endif 4967 ieee80211_restart_all(ic); 4968 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4969 return; 4970 } 4971 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4972 } 4973 } 4974 4975 static void 4976 iwm_parent(struct ieee80211com *ic) 4977 { 4978 struct iwm_softc *sc = ic->ic_softc; 4979 int startall = 0; 4980 int rfkill = 0; 4981 4982 IWM_LOCK(sc); 4983 if (ic->ic_nrunning > 0) { 4984 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 4985 iwm_init(sc); 4986 rfkill = iwm_check_rfkill(sc); 4987 if (!rfkill) 4988 startall = 1; 4989 } 4990 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 4991 iwm_stop(sc); 4992 IWM_UNLOCK(sc); 4993 if (startall) 4994 ieee80211_start_all(ic); 4995 else if (rfkill) 4996 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 4997 } 4998 4999 static void 5000 iwm_rftoggle_task(void *arg, int npending __unused) 5001 { 5002 struct iwm_softc *sc = arg; 5003 struct ieee80211com *ic = &sc->sc_ic; 5004 int rfkill; 5005 5006 IWM_LOCK(sc); 5007 rfkill = iwm_check_rfkill(sc); 5008 IWM_UNLOCK(sc); 5009 if (rfkill) { 5010 device_printf(sc->sc_dev, 5011 "%s: rfkill switch, disabling interface\n", __func__); 5012 ieee80211_suspend_all(ic); 5013 ieee80211_notify_radio(ic, 0); 5014 } else { 5015 device_printf(sc->sc_dev, 5016 "%s: rfkill cleared, re-enabling interface\n", __func__); 5017 ieee80211_resume_all(ic); 5018 ieee80211_notify_radio(ic, 1); 5019 } 5020 } 5021 5022 /* 5023 * The interrupt side of things 5024 */ 5025 5026 /* 5027 * error dumping routines are from iwlwifi/mvm/utils.c 5028 */ 5029 5030 /* 5031 * Note: This structure is read from the device with IO accesses, 5032 * and the reading already does the endian conversion. As it is 5033 * read with uint32_t-sized accesses, any members with a different size 5034 * need to be ordered correctly though! 5035 */ 5036 struct iwm_error_event_table { 5037 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5038 uint32_t error_id; /* type of error */ 5039 uint32_t trm_hw_status0; /* TRM HW status */ 5040 uint32_t trm_hw_status1; /* TRM HW status */ 5041 uint32_t blink2; /* branch link */ 5042 uint32_t ilink1; /* interrupt link */ 5043 uint32_t ilink2; /* interrupt link */ 5044 uint32_t data1; /* error-specific data */ 5045 uint32_t data2; /* error-specific data */ 5046 uint32_t data3; /* error-specific data */ 5047 uint32_t bcon_time; /* beacon timer */ 5048 uint32_t tsf_low; /* network timestamp function timer */ 5049 uint32_t tsf_hi; /* network timestamp function timer */ 5050 uint32_t gp1; /* GP1 timer register */ 5051 uint32_t gp2; /* GP2 timer register */ 5052 uint32_t fw_rev_type; /* firmware revision type */ 5053 uint32_t major; /* uCode version major */ 5054 uint32_t minor; /* uCode version minor */ 5055 uint32_t hw_ver; /* HW Silicon version */ 5056 uint32_t brd_ver; /* HW board version */ 5057 uint32_t log_pc; /* log program counter */ 5058 uint32_t frame_ptr; /* frame pointer */ 5059 uint32_t stack_ptr; /* stack pointer */ 5060 uint32_t hcmd; /* last host command header */ 5061 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 5062 * rxtx_flag */ 5063 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 5064 * host_flag */ 5065 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 5066 * enc_flag */ 5067 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 5068 * time_flag */ 5069 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 5070 * wico interrupt */ 5071 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 5072 uint32_t wait_event; /* wait event() caller address */ 5073 uint32_t l2p_control; /* L2pControlField */ 5074 uint32_t l2p_duration; /* L2pDurationField */ 5075 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 5076 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 5077 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 5078 * (LMPM_PMG_SEL) */ 5079 uint32_t u_timestamp; /* indicate when the date and time of the 5080 * compilation */ 5081 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 5082 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 5083 5084 /* 5085 * UMAC error struct - relevant starting from family 8000 chip. 5086 * Note: This structure is read from the device with IO accesses, 5087 * and the reading already does the endian conversion. As it is 5088 * read with u32-sized accesses, any members with a different size 5089 * need to be ordered correctly though! 5090 */ 5091 struct iwm_umac_error_event_table { 5092 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5093 uint32_t error_id; /* type of error */ 5094 uint32_t blink1; /* branch link */ 5095 uint32_t blink2; /* branch link */ 5096 uint32_t ilink1; /* interrupt link */ 5097 uint32_t ilink2; /* interrupt link */ 5098 uint32_t data1; /* error-specific data */ 5099 uint32_t data2; /* error-specific data */ 5100 uint32_t data3; /* error-specific data */ 5101 uint32_t umac_major; 5102 uint32_t umac_minor; 5103 uint32_t frame_pointer; /* core register 27*/ 5104 uint32_t stack_pointer; /* core register 28 */ 5105 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 5106 uint32_t nic_isr_pref; /* ISR status register */ 5107 } __packed; 5108 5109 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 5110 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 5111 5112 #ifdef IWM_DEBUG 5113 struct { 5114 const char *name; 5115 uint8_t num; 5116 } advanced_lookup[] = { 5117 { "NMI_INTERRUPT_WDG", 0x34 }, 5118 { "SYSASSERT", 0x35 }, 5119 { "UCODE_VERSION_MISMATCH", 0x37 }, 5120 { "BAD_COMMAND", 0x38 }, 5121 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 5122 { "FATAL_ERROR", 0x3D }, 5123 { "NMI_TRM_HW_ERR", 0x46 }, 5124 { "NMI_INTERRUPT_TRM", 0x4C }, 5125 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 5126 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 5127 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 5128 { "NMI_INTERRUPT_HOST", 0x66 }, 5129 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 5130 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 5131 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 5132 { "ADVANCED_SYSASSERT", 0 }, 5133 }; 5134 5135 static const char * 5136 iwm_desc_lookup(uint32_t num) 5137 { 5138 int i; 5139 5140 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 5141 if (advanced_lookup[i].num == num) 5142 return advanced_lookup[i].name; 5143 5144 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 5145 return advanced_lookup[i].name; 5146 } 5147 5148 static void 5149 iwm_nic_umac_error(struct iwm_softc *sc) 5150 { 5151 struct iwm_umac_error_event_table table; 5152 uint32_t base; 5153 5154 base = sc->umac_error_event_table; 5155 5156 if (base < 0x800000) { 5157 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 5158 base); 5159 return; 5160 } 5161 5162 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5163 device_printf(sc->sc_dev, "reading errlog failed\n"); 5164 return; 5165 } 5166 5167 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5168 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 5169 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5170 sc->sc_flags, table.valid); 5171 } 5172 5173 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 5174 iwm_desc_lookup(table.error_id)); 5175 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 5176 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 5177 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 5178 table.ilink1); 5179 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 5180 table.ilink2); 5181 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 5182 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 5183 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 5184 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 5185 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 5186 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 5187 table.frame_pointer); 5188 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 5189 table.stack_pointer); 5190 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 5191 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 5192 table.nic_isr_pref); 5193 } 5194 5195 /* 5196 * Support for dumping the error log seemed like a good idea ... 5197 * but it's mostly hex junk and the only sensible thing is the 5198 * hw/ucode revision (which we know anyway). Since it's here, 5199 * I'll just leave it in, just in case e.g. the Intel guys want to 5200 * help us decipher some "ADVANCED_SYSASSERT" later. 5201 */ 5202 static void 5203 iwm_nic_error(struct iwm_softc *sc) 5204 { 5205 struct iwm_error_event_table table; 5206 uint32_t base; 5207 5208 device_printf(sc->sc_dev, "dumping device error log\n"); 5209 base = sc->error_event_table[0]; 5210 if (base < 0x800000) { 5211 device_printf(sc->sc_dev, 5212 "Invalid error log pointer 0x%08x\n", base); 5213 return; 5214 } 5215 5216 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5217 device_printf(sc->sc_dev, "reading errlog failed\n"); 5218 return; 5219 } 5220 5221 if (!table.valid) { 5222 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5223 return; 5224 } 5225 5226 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5227 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5228 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5229 sc->sc_flags, table.valid); 5230 } 5231 5232 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5233 iwm_desc_lookup(table.error_id)); 5234 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5235 table.trm_hw_status0); 5236 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5237 table.trm_hw_status1); 5238 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5239 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5240 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5241 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5242 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5243 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5244 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5245 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5246 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5247 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5248 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5249 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5250 table.fw_rev_type); 5251 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5252 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5253 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5254 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5255 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5256 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5257 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5258 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5259 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5260 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5261 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5262 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5263 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5264 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5265 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5266 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5267 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5268 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5269 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5270 5271 if (sc->umac_error_event_table) 5272 iwm_nic_umac_error(sc); 5273 } 5274 #endif 5275 5276 static void 5277 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m) 5278 { 5279 struct ieee80211com *ic = &sc->sc_ic; 5280 struct iwm_cmd_response *cresp; 5281 struct mbuf *m1; 5282 uint32_t offset = 0; 5283 uint32_t maxoff = IWM_RBUF_SIZE; 5284 uint32_t nextoff; 5285 boolean_t stolen = FALSE; 5286 5287 #define HAVEROOM(a) \ 5288 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff) 5289 5290 while (HAVEROOM(offset)) { 5291 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, 5292 offset); 5293 int qid, idx, code, len; 5294 5295 qid = pkt->hdr.qid; 5296 idx = pkt->hdr.idx; 5297 5298 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5299 5300 /* 5301 * randomly get these from the firmware, no idea why. 5302 * they at least seem harmless, so just ignore them for now 5303 */ 5304 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) || 5305 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) { 5306 break; 5307 } 5308 5309 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5310 "rx packet qid=%d idx=%d type=%x\n", 5311 qid & ~0x80, pkt->hdr.idx, code); 5312 5313 len = iwm_rx_packet_len(pkt); 5314 len += sizeof(uint32_t); /* account for status word */ 5315 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN); 5316 5317 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt); 5318 5319 switch (code) { 5320 case IWM_REPLY_RX_PHY_CMD: 5321 iwm_rx_rx_phy_cmd(sc, pkt); 5322 break; 5323 5324 case IWM_REPLY_RX_MPDU_CMD: { 5325 /* 5326 * If this is the last frame in the RX buffer, we 5327 * can directly feed the mbuf to the sharks here. 5328 */ 5329 struct iwm_rx_packet *nextpkt = mtodoff(m, 5330 struct iwm_rx_packet *, nextoff); 5331 if (!HAVEROOM(nextoff) || 5332 (nextpkt->hdr.code == 0 && 5333 (nextpkt->hdr.qid & ~0x80) == 0 && 5334 nextpkt->hdr.idx == 0) || 5335 (nextpkt->len_n_flags == 5336 htole32(IWM_FH_RSCSR_FRAME_INVALID))) { 5337 if (iwm_rx_mpdu(sc, m, offset, stolen)) { 5338 stolen = FALSE; 5339 /* Make sure we abort the loop */ 5340 nextoff = maxoff; 5341 } 5342 break; 5343 } 5344 5345 /* 5346 * Use m_copym instead of m_split, because that 5347 * makes it easier to keep a valid rx buffer in 5348 * the ring, when iwm_rx_mpdu() fails. 5349 * 5350 * We need to start m_copym() at offset 0, to get the 5351 * M_PKTHDR flag preserved. 5352 */ 5353 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 5354 if (m1) { 5355 if (iwm_rx_mpdu(sc, m1, offset, stolen)) 5356 stolen = TRUE; 5357 else 5358 m_freem(m1); 5359 } 5360 break; 5361 } 5362 5363 case IWM_TX_CMD: 5364 iwm_rx_tx_cmd(sc, pkt); 5365 break; 5366 5367 case IWM_MISSED_BEACONS_NOTIFICATION: { 5368 struct iwm_missed_beacons_notif *resp; 5369 int missed; 5370 5371 /* XXX look at mac_id to determine interface ID */ 5372 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5373 5374 resp = (void *)pkt->data; 5375 missed = le32toh(resp->consec_missed_beacons); 5376 5377 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5378 "%s: MISSED_BEACON: mac_id=%d, " 5379 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5380 "num_rx=%d\n", 5381 __func__, 5382 le32toh(resp->mac_id), 5383 le32toh(resp->consec_missed_beacons_since_last_rx), 5384 le32toh(resp->consec_missed_beacons), 5385 le32toh(resp->num_expected_beacons), 5386 le32toh(resp->num_recvd_beacons)); 5387 5388 /* Be paranoid */ 5389 if (vap == NULL) 5390 break; 5391 5392 /* XXX no net80211 locking? */ 5393 if (vap->iv_state == IEEE80211_S_RUN && 5394 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5395 if (missed > vap->iv_bmissthreshold) { 5396 /* XXX bad locking; turn into task */ 5397 IWM_UNLOCK(sc); 5398 ieee80211_beacon_miss(ic); 5399 IWM_LOCK(sc); 5400 } 5401 } 5402 5403 break; 5404 } 5405 5406 case IWM_MFUART_LOAD_NOTIFICATION: 5407 break; 5408 5409 case IWM_ALIVE: 5410 break; 5411 5412 case IWM_CALIB_RES_NOTIF_PHY_DB: 5413 break; 5414 5415 case IWM_STATISTICS_NOTIFICATION: 5416 iwm_handle_rx_statistics(sc, pkt); 5417 break; 5418 5419 case IWM_NVM_ACCESS_CMD: 5420 case IWM_MCC_UPDATE_CMD: 5421 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5422 memcpy(sc->sc_cmd_resp, 5423 pkt, sizeof(sc->sc_cmd_resp)); 5424 } 5425 break; 5426 5427 case IWM_MCC_CHUB_UPDATE_CMD: { 5428 struct iwm_mcc_chub_notif *notif; 5429 notif = (void *)pkt->data; 5430 5431 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5432 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5433 sc->sc_fw_mcc[2] = '\0'; 5434 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 5435 "fw source %d sent CC '%s'\n", 5436 notif->source_id, sc->sc_fw_mcc); 5437 break; 5438 } 5439 5440 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5441 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP, 5442 IWM_DTS_MEASUREMENT_NOTIF_WIDE): { 5443 struct iwm_dts_measurement_notif_v1 *notif; 5444 5445 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) { 5446 device_printf(sc->sc_dev, 5447 "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 5448 break; 5449 } 5450 notif = (void *)pkt->data; 5451 IWM_DPRINTF(sc, IWM_DEBUG_TEMP, 5452 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n", 5453 notif->temp); 5454 break; 5455 } 5456 5457 case IWM_PHY_CONFIGURATION_CMD: 5458 case IWM_TX_ANT_CONFIGURATION_CMD: 5459 case IWM_ADD_STA: 5460 case IWM_MAC_CONTEXT_CMD: 5461 case IWM_REPLY_SF_CFG_CMD: 5462 case IWM_POWER_TABLE_CMD: 5463 case IWM_LTR_CONFIG: 5464 case IWM_PHY_CONTEXT_CMD: 5465 case IWM_BINDING_CONTEXT_CMD: 5466 case IWM_TIME_EVENT_CMD: 5467 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5468 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5469 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC): 5470 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5471 case IWM_SCAN_OFFLOAD_ABORT_CMD: 5472 case IWM_REPLY_BEACON_FILTERING_CMD: 5473 case IWM_MAC_PM_POWER_TABLE: 5474 case IWM_TIME_QUOTA_CMD: 5475 case IWM_REMOVE_STA: 5476 case IWM_TXPATH_FLUSH: 5477 case IWM_LQ_CMD: 5478 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, 5479 IWM_FW_PAGING_BLOCK_CMD): 5480 case IWM_BT_CONFIG: 5481 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5482 cresp = (void *)pkt->data; 5483 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5484 memcpy(sc->sc_cmd_resp, 5485 pkt, sizeof(*pkt)+sizeof(*cresp)); 5486 } 5487 break; 5488 5489 /* ignore */ 5490 case IWM_PHY_DB_CMD: 5491 break; 5492 5493 case IWM_INIT_COMPLETE_NOTIF: 5494 break; 5495 5496 case IWM_SCAN_OFFLOAD_COMPLETE: 5497 iwm_rx_lmac_scan_complete_notif(sc, pkt); 5498 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5499 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5500 ieee80211_runtask(ic, &sc->sc_es_task); 5501 } 5502 break; 5503 5504 case IWM_SCAN_ITERATION_COMPLETE: { 5505 break; 5506 } 5507 5508 case IWM_SCAN_COMPLETE_UMAC: 5509 iwm_rx_umac_scan_complete_notif(sc, pkt); 5510 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5511 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5512 ieee80211_runtask(ic, &sc->sc_es_task); 5513 } 5514 break; 5515 5516 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5517 #ifdef IWM_DEBUG 5518 struct iwm_umac_scan_iter_complete_notif *notif; 5519 notif = (void *)pkt->data; 5520 5521 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5522 "complete, status=0x%x, %d channels scanned\n", 5523 notif->status, notif->scanned_channels); 5524 #endif 5525 break; 5526 } 5527 5528 case IWM_REPLY_ERROR: { 5529 struct iwm_error_resp *resp; 5530 resp = (void *)pkt->data; 5531 5532 device_printf(sc->sc_dev, 5533 "firmware error 0x%x, cmd 0x%x\n", 5534 le32toh(resp->error_type), 5535 resp->cmd_id); 5536 break; 5537 } 5538 5539 case IWM_TIME_EVENT_NOTIFICATION: 5540 iwm_rx_time_event_notif(sc, pkt); 5541 break; 5542 5543 /* 5544 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 5545 * messages. Just ignore them for now. 5546 */ 5547 case IWM_DEBUG_LOG_MSG: 5548 break; 5549 5550 case IWM_MCAST_FILTER_CMD: 5551 break; 5552 5553 case IWM_SCD_QUEUE_CFG: { 5554 #ifdef IWM_DEBUG 5555 struct iwm_scd_txq_cfg_rsp *rsp; 5556 rsp = (void *)pkt->data; 5557 5558 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5559 "queue cfg token=0x%x sta_id=%d " 5560 "tid=%d scd_queue=%d\n", 5561 rsp->token, rsp->sta_id, rsp->tid, 5562 rsp->scd_queue); 5563 #endif 5564 break; 5565 } 5566 5567 default: 5568 device_printf(sc->sc_dev, 5569 "code %x, frame %d/%d %x unhandled\n", 5570 code, qid & ~0x80, idx, pkt->len_n_flags); 5571 break; 5572 } 5573 5574 /* 5575 * Why test bit 0x80? The Linux driver: 5576 * 5577 * There is one exception: uCode sets bit 15 when it 5578 * originates the response/notification, i.e. when the 5579 * response/notification is not a direct response to a 5580 * command sent by the driver. For example, uCode issues 5581 * IWM_REPLY_RX when it sends a received frame to the driver; 5582 * it is not a direct response to any driver command. 5583 * 5584 * Ok, so since when is 7 == 15? Well, the Linux driver 5585 * uses a slightly different format for pkt->hdr, and "qid" 5586 * is actually the upper byte of a two-byte field. 5587 */ 5588 if (!(qid & (1 << 7))) 5589 iwm_cmd_done(sc, pkt); 5590 5591 offset = nextoff; 5592 } 5593 if (stolen) 5594 m_freem(m); 5595 #undef HAVEROOM 5596 } 5597 5598 /* 5599 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5600 * Basic structure from if_iwn 5601 */ 5602 static void 5603 iwm_notif_intr(struct iwm_softc *sc) 5604 { 5605 int count; 5606 uint32_t wreg; 5607 uint16_t hw; 5608 5609 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5610 BUS_DMASYNC_POSTREAD); 5611 5612 if (sc->cfg->mqrx_supported) { 5613 count = IWM_RX_MQ_RING_COUNT; 5614 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG; 5615 } else { 5616 count = IWM_RX_LEGACY_RING_COUNT; 5617 wreg = IWM_FH_RSCSR_CHNL0_WPTR; 5618 } 5619 5620 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5621 5622 /* 5623 * Process responses 5624 */ 5625 while (sc->rxq.cur != hw) { 5626 struct iwm_rx_ring *ring = &sc->rxq; 5627 struct iwm_rx_data *data = &ring->data[ring->cur]; 5628 5629 bus_dmamap_sync(ring->data_dmat, data->map, 5630 BUS_DMASYNC_POSTREAD); 5631 5632 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5633 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur); 5634 iwm_handle_rxb(sc, data->m); 5635 5636 ring->cur = (ring->cur + 1) % count; 5637 } 5638 5639 /* 5640 * Tell the firmware that it can reuse the ring entries that 5641 * we have just processed. 5642 * Seems like the hardware gets upset unless we align 5643 * the write by 8?? 5644 */ 5645 hw = (hw == 0) ? count - 1 : hw - 1; 5646 IWM_WRITE(sc, wreg, rounddown2(hw, 8)); 5647 } 5648 5649 static void 5650 iwm_intr(void *arg) 5651 { 5652 struct iwm_softc *sc = arg; 5653 int handled = 0; 5654 int r1, r2; 5655 int isperiodic = 0; 5656 5657 IWM_LOCK(sc); 5658 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5659 5660 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5661 uint32_t *ict = sc->ict_dma.vaddr; 5662 int tmp; 5663 5664 tmp = htole32(ict[sc->ict_cur]); 5665 if (!tmp) 5666 goto out_ena; 5667 5668 /* 5669 * ok, there was something. keep plowing until we have all. 5670 */ 5671 r1 = r2 = 0; 5672 while (tmp) { 5673 r1 |= tmp; 5674 ict[sc->ict_cur] = 0; 5675 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5676 tmp = htole32(ict[sc->ict_cur]); 5677 } 5678 5679 /* this is where the fun begins. don't ask */ 5680 if (r1 == 0xffffffff) 5681 r1 = 0; 5682 5683 /* i am not expected to understand this */ 5684 if (r1 & 0xc0000) 5685 r1 |= 0x8000; 5686 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5687 } else { 5688 r1 = IWM_READ(sc, IWM_CSR_INT); 5689 /* "hardware gone" (where, fishing?) */ 5690 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5691 goto out; 5692 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5693 } 5694 if (r1 == 0 && r2 == 0) { 5695 goto out_ena; 5696 } 5697 5698 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5699 5700 /* Safely ignore these bits for debug checks below */ 5701 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD); 5702 5703 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5704 int i; 5705 struct ieee80211com *ic = &sc->sc_ic; 5706 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5707 5708 #ifdef IWM_DEBUG 5709 iwm_nic_error(sc); 5710 #endif 5711 /* Dump driver status (TX and RX rings) while we're here. */ 5712 device_printf(sc->sc_dev, "driver status:\n"); 5713 for (i = 0; i < IWM_MAX_QUEUES; i++) { 5714 struct iwm_tx_ring *ring = &sc->txq[i]; 5715 device_printf(sc->sc_dev, 5716 " tx ring %2d: qid=%-2d cur=%-3d " 5717 "queued=%-3d\n", 5718 i, ring->qid, ring->cur, ring->queued); 5719 } 5720 device_printf(sc->sc_dev, 5721 " rx ring: cur=%d\n", sc->rxq.cur); 5722 device_printf(sc->sc_dev, 5723 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5724 5725 /* Reset our firmware state tracking. */ 5726 sc->sc_firmware_state = 0; 5727 /* Don't stop the device; just do a VAP restart */ 5728 IWM_UNLOCK(sc); 5729 5730 if (vap == NULL) { 5731 printf("%s: null vap\n", __func__); 5732 return; 5733 } 5734 5735 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5736 "restarting\n", __func__, vap->iv_state); 5737 5738 ieee80211_restart_all(ic); 5739 return; 5740 } 5741 5742 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5743 handled |= IWM_CSR_INT_BIT_HW_ERR; 5744 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5745 iwm_stop(sc); 5746 goto out; 5747 } 5748 5749 /* firmware chunk loaded */ 5750 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5751 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5752 handled |= IWM_CSR_INT_BIT_FH_TX; 5753 sc->sc_fw_chunk_done = 1; 5754 wakeup(&sc->sc_fw); 5755 } 5756 5757 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5758 handled |= IWM_CSR_INT_BIT_RF_KILL; 5759 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 5760 } 5761 5762 /* 5763 * The Linux driver uses periodic interrupts to avoid races. 5764 * We cargo-cult like it's going out of fashion. 5765 */ 5766 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5767 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5768 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5769 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5770 IWM_WRITE_1(sc, 5771 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5772 isperiodic = 1; 5773 } 5774 5775 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5776 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5777 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5778 5779 iwm_notif_intr(sc); 5780 5781 /* enable periodic interrupt, see above */ 5782 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5783 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5784 IWM_CSR_INT_PERIODIC_ENA); 5785 } 5786 5787 if (__predict_false(r1 & ~handled)) 5788 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5789 "%s: unhandled interrupts: %x\n", __func__, r1); 5790 out_ena: 5791 iwm_restore_interrupts(sc); 5792 out: 5793 IWM_UNLOCK(sc); 5794 return; 5795 } 5796 5797 /* 5798 * Autoconf glue-sniffing 5799 */ 5800 #define PCI_VENDOR_INTEL 0x8086 5801 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5802 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5803 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5804 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5805 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb 5806 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5807 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5808 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5809 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5810 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5811 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5812 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd 5813 #define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0 5814 #define PCI_PRODUCT_INTEL_WL_9560_2 0xa370 5815 #define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc 5816 #define PCI_PRODUCT_INTEL_WL_9260_1 0x2526 5817 5818 static const struct iwm_devices { 5819 uint16_t device; 5820 const struct iwm_cfg *cfg; 5821 } iwm_devices[] = { 5822 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg }, 5823 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg }, 5824 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg }, 5825 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg }, 5826 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg }, 5827 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg }, 5828 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg }, 5829 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg }, 5830 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg }, 5831 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg }, 5832 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg }, 5833 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg }, 5834 { PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg }, 5835 { PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg }, 5836 { PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg }, 5837 { PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg }, 5838 }; 5839 5840 static int 5841 iwm_probe(device_t dev) 5842 { 5843 int i; 5844 5845 for (i = 0; i < nitems(iwm_devices); i++) { 5846 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5847 pci_get_device(dev) == iwm_devices[i].device) { 5848 device_set_desc(dev, iwm_devices[i].cfg->name); 5849 return (BUS_PROBE_DEFAULT); 5850 } 5851 } 5852 5853 return (ENXIO); 5854 } 5855 5856 static int 5857 iwm_dev_check(device_t dev) 5858 { 5859 struct iwm_softc *sc; 5860 uint16_t devid; 5861 int i; 5862 5863 sc = device_get_softc(dev); 5864 5865 devid = pci_get_device(dev); 5866 for (i = 0; i < nitems(iwm_devices); i++) { 5867 if (iwm_devices[i].device == devid) { 5868 sc->cfg = iwm_devices[i].cfg; 5869 return (0); 5870 } 5871 } 5872 device_printf(dev, "unknown adapter type\n"); 5873 return ENXIO; 5874 } 5875 5876 /* PCI registers */ 5877 #define PCI_CFG_RETRY_TIMEOUT 0x041 5878 5879 static int 5880 iwm_pci_attach(device_t dev) 5881 { 5882 struct iwm_softc *sc; 5883 int count, error, rid; 5884 uint16_t reg; 5885 5886 sc = device_get_softc(dev); 5887 5888 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5889 * PCI Tx retries from interfering with C3 CPU state */ 5890 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 5891 5892 /* Enable bus-mastering and hardware bug workaround. */ 5893 pci_enable_busmaster(dev); 5894 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5895 /* if !MSI */ 5896 if (reg & PCIM_STATUS_INTxSTATE) { 5897 reg &= ~PCIM_STATUS_INTxSTATE; 5898 } 5899 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5900 5901 rid = PCIR_BAR(0); 5902 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5903 RF_ACTIVE); 5904 if (sc->sc_mem == NULL) { 5905 device_printf(sc->sc_dev, "can't map mem space\n"); 5906 return (ENXIO); 5907 } 5908 sc->sc_st = rman_get_bustag(sc->sc_mem); 5909 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5910 5911 /* Install interrupt handler. */ 5912 count = 1; 5913 rid = 0; 5914 if (pci_alloc_msi(dev, &count) == 0) 5915 rid = 1; 5916 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5917 (rid != 0 ? 0 : RF_SHAREABLE)); 5918 if (sc->sc_irq == NULL) { 5919 device_printf(dev, "can't map interrupt\n"); 5920 return (ENXIO); 5921 } 5922 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5923 NULL, iwm_intr, sc, &sc->sc_ih); 5924 if (error != 0) { 5925 device_printf(dev, "can't establish interrupt"); 5926 return (error); 5927 } 5928 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5929 5930 return (0); 5931 } 5932 5933 static void 5934 iwm_pci_detach(device_t dev) 5935 { 5936 struct iwm_softc *sc = device_get_softc(dev); 5937 5938 if (sc->sc_irq != NULL) { 5939 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5940 bus_release_resource(dev, SYS_RES_IRQ, 5941 rman_get_rid(sc->sc_irq), sc->sc_irq); 5942 pci_release_msi(dev); 5943 } 5944 if (sc->sc_mem != NULL) 5945 bus_release_resource(dev, SYS_RES_MEMORY, 5946 rman_get_rid(sc->sc_mem), sc->sc_mem); 5947 } 5948 5949 static int 5950 iwm_attach(device_t dev) 5951 { 5952 struct iwm_softc *sc = device_get_softc(dev); 5953 struct ieee80211com *ic = &sc->sc_ic; 5954 int error; 5955 int txq_i, i; 5956 5957 sc->sc_dev = dev; 5958 sc->sc_attached = 1; 5959 IWM_LOCK_INIT(sc); 5960 mbufq_init(&sc->sc_snd, ifqmaxlen); 5961 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5962 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0); 5963 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5964 TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc); 5965 5966 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK, 5967 taskqueue_thread_enqueue, &sc->sc_tq); 5968 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq"); 5969 if (error != 0) { 5970 device_printf(dev, "can't start taskq thread, error %d\n", 5971 error); 5972 goto fail; 5973 } 5974 5975 error = iwm_dev_check(dev); 5976 if (error != 0) 5977 goto fail; 5978 5979 sc->sc_notif_wait = iwm_notification_wait_init(sc); 5980 if (sc->sc_notif_wait == NULL) { 5981 device_printf(dev, "failed to init notification wait struct\n"); 5982 goto fail; 5983 } 5984 5985 sc->sf_state = IWM_SF_UNINIT; 5986 5987 /* Init phy db */ 5988 sc->sc_phy_db = iwm_phy_db_init(sc); 5989 if (!sc->sc_phy_db) { 5990 device_printf(dev, "Cannot init phy_db\n"); 5991 goto fail; 5992 } 5993 5994 /* Set EBS as successful as long as not stated otherwise by the FW. */ 5995 sc->last_ebs_successful = TRUE; 5996 5997 /* PCI attach */ 5998 error = iwm_pci_attach(dev); 5999 if (error != 0) 6000 goto fail; 6001 6002 sc->sc_wantresp = -1; 6003 6004 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 6005 /* 6006 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 6007 * changed, and now the revision step also includes bit 0-1 (no more 6008 * "dash" value). To keep hw_rev backwards compatible - we'll store it 6009 * in the old format. 6010 */ 6011 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) { 6012 int ret; 6013 uint32_t hw_step; 6014 6015 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 6016 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 6017 6018 if (iwm_prepare_card_hw(sc) != 0) { 6019 device_printf(dev, "could not initialize hardware\n"); 6020 goto fail; 6021 } 6022 6023 /* 6024 * In order to recognize C step the driver should read the 6025 * chip version id located at the AUX bus MISC address. 6026 */ 6027 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 6028 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 6029 DELAY(2); 6030 6031 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 6032 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6033 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6034 25000); 6035 if (!ret) { 6036 device_printf(sc->sc_dev, 6037 "Failed to wake up the nic\n"); 6038 goto fail; 6039 } 6040 6041 if (iwm_nic_lock(sc)) { 6042 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 6043 hw_step |= IWM_ENABLE_WFPM; 6044 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 6045 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 6046 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 6047 if (hw_step == 0x3) 6048 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 6049 (IWM_SILICON_C_STEP << 2); 6050 iwm_nic_unlock(sc); 6051 } else { 6052 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 6053 goto fail; 6054 } 6055 } 6056 6057 /* special-case 7265D, it has the same PCI IDs. */ 6058 if (sc->cfg == &iwm7265_cfg && 6059 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) { 6060 sc->cfg = &iwm7265d_cfg; 6061 } 6062 6063 /* Allocate DMA memory for firmware transfers. */ 6064 if ((error = iwm_alloc_fwmem(sc)) != 0) { 6065 device_printf(dev, "could not allocate memory for firmware\n"); 6066 goto fail; 6067 } 6068 6069 /* Allocate "Keep Warm" page. */ 6070 if ((error = iwm_alloc_kw(sc)) != 0) { 6071 device_printf(dev, "could not allocate keep warm page\n"); 6072 goto fail; 6073 } 6074 6075 /* We use ICT interrupts */ 6076 if ((error = iwm_alloc_ict(sc)) != 0) { 6077 device_printf(dev, "could not allocate ICT table\n"); 6078 goto fail; 6079 } 6080 6081 /* Allocate TX scheduler "rings". */ 6082 if ((error = iwm_alloc_sched(sc)) != 0) { 6083 device_printf(dev, "could not allocate TX scheduler rings\n"); 6084 goto fail; 6085 } 6086 6087 /* Allocate TX rings */ 6088 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 6089 if ((error = iwm_alloc_tx_ring(sc, 6090 &sc->txq[txq_i], txq_i)) != 0) { 6091 device_printf(dev, 6092 "could not allocate TX ring %d\n", 6093 txq_i); 6094 goto fail; 6095 } 6096 } 6097 6098 /* Allocate RX ring. */ 6099 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 6100 device_printf(dev, "could not allocate RX ring\n"); 6101 goto fail; 6102 } 6103 6104 /* Clear pending interrupts. */ 6105 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 6106 6107 ic->ic_softc = sc; 6108 ic->ic_name = device_get_nameunit(sc->sc_dev); 6109 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 6110 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 6111 6112 /* Set device capabilities. */ 6113 ic->ic_caps = 6114 IEEE80211_C_STA | 6115 IEEE80211_C_WPA | /* WPA/RSN */ 6116 IEEE80211_C_WME | 6117 IEEE80211_C_PMGT | 6118 IEEE80211_C_SHSLOT | /* short slot time supported */ 6119 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 6120 // IEEE80211_C_BGSCAN /* capable of bg scanning */ 6121 ; 6122 /* Advertise full-offload scanning */ 6123 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 6124 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 6125 sc->sc_phyctxt[i].id = i; 6126 sc->sc_phyctxt[i].color = 0; 6127 sc->sc_phyctxt[i].ref = 0; 6128 sc->sc_phyctxt[i].channel = NULL; 6129 } 6130 6131 /* Default noise floor */ 6132 sc->sc_noise = -96; 6133 6134 /* Max RSSI */ 6135 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 6136 6137 #ifdef IWM_DEBUG 6138 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 6139 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 6140 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 6141 #endif 6142 6143 error = iwm_read_firmware(sc); 6144 if (error) { 6145 goto fail; 6146 } else if (sc->sc_fw.fw_fp == NULL) { 6147 /* 6148 * XXX Add a solution for properly deferring firmware load 6149 * during bootup. 6150 */ 6151 goto fail; 6152 } else { 6153 sc->sc_preinit_hook.ich_func = iwm_preinit; 6154 sc->sc_preinit_hook.ich_arg = sc; 6155 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 6156 device_printf(dev, 6157 "config_intrhook_establish failed\n"); 6158 goto fail; 6159 } 6160 } 6161 6162 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6163 "<-%s\n", __func__); 6164 6165 return 0; 6166 6167 /* Free allocated memory if something failed during attachment. */ 6168 fail: 6169 iwm_detach_local(sc, 0); 6170 6171 return ENXIO; 6172 } 6173 6174 static int 6175 iwm_is_valid_ether_addr(uint8_t *addr) 6176 { 6177 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 6178 6179 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 6180 return (FALSE); 6181 6182 return (TRUE); 6183 } 6184 6185 static int 6186 iwm_wme_update(struct ieee80211com *ic) 6187 { 6188 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6189 struct iwm_softc *sc = ic->ic_softc; 6190 struct chanAccParams chp; 6191 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6192 struct iwm_vap *ivp = IWM_VAP(vap); 6193 struct iwm_node *in; 6194 struct wmeParams tmp[WME_NUM_AC]; 6195 int aci, error; 6196 6197 if (vap == NULL) 6198 return (0); 6199 6200 ieee80211_wme_ic_getparams(ic, &chp); 6201 6202 IEEE80211_LOCK(ic); 6203 for (aci = 0; aci < WME_NUM_AC; aci++) 6204 tmp[aci] = chp.cap_wmeParams[aci]; 6205 IEEE80211_UNLOCK(ic); 6206 6207 IWM_LOCK(sc); 6208 for (aci = 0; aci < WME_NUM_AC; aci++) { 6209 const struct wmeParams *ac = &tmp[aci]; 6210 ivp->queue_params[aci].aifsn = ac->wmep_aifsn; 6211 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin); 6212 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax); 6213 ivp->queue_params[aci].edca_txop = 6214 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit); 6215 } 6216 ivp->have_wme = TRUE; 6217 if (ivp->is_uploaded && vap->iv_bss != NULL) { 6218 in = IWM_NODE(vap->iv_bss); 6219 if (in->in_assoc) { 6220 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) { 6221 device_printf(sc->sc_dev, 6222 "%s: failed to update MAC\n", __func__); 6223 } 6224 } 6225 } 6226 IWM_UNLOCK(sc); 6227 6228 return (0); 6229 #undef IWM_EXP2 6230 } 6231 6232 static void 6233 iwm_preinit(void *arg) 6234 { 6235 struct iwm_softc *sc = arg; 6236 device_t dev = sc->sc_dev; 6237 struct ieee80211com *ic = &sc->sc_ic; 6238 int error; 6239 6240 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6241 "->%s\n", __func__); 6242 6243 IWM_LOCK(sc); 6244 if ((error = iwm_start_hw(sc)) != 0) { 6245 device_printf(dev, "could not initialize hardware\n"); 6246 IWM_UNLOCK(sc); 6247 goto fail; 6248 } 6249 6250 error = iwm_run_init_ucode(sc, 1); 6251 iwm_stop_device(sc); 6252 if (error) { 6253 IWM_UNLOCK(sc); 6254 goto fail; 6255 } 6256 device_printf(dev, 6257 "hw rev 0x%x, fw ver %s, address %s\n", 6258 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6259 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr)); 6260 6261 /* not all hardware can do 5GHz band */ 6262 if (!sc->nvm_data->sku_cap_band_52GHz_enable) 6263 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6264 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6265 IWM_UNLOCK(sc); 6266 6267 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6268 ic->ic_channels); 6269 6270 /* 6271 * At this point we've committed - if we fail to do setup, 6272 * we now also have to tear down the net80211 state. 6273 */ 6274 ieee80211_ifattach(ic); 6275 ic->ic_vap_create = iwm_vap_create; 6276 ic->ic_vap_delete = iwm_vap_delete; 6277 ic->ic_raw_xmit = iwm_raw_xmit; 6278 ic->ic_node_alloc = iwm_node_alloc; 6279 ic->ic_scan_start = iwm_scan_start; 6280 ic->ic_scan_end = iwm_scan_end; 6281 ic->ic_update_mcast = iwm_update_mcast; 6282 ic->ic_getradiocaps = iwm_init_channel_map; 6283 ic->ic_set_channel = iwm_set_channel; 6284 ic->ic_scan_curchan = iwm_scan_curchan; 6285 ic->ic_scan_mindwell = iwm_scan_mindwell; 6286 ic->ic_wme.wme_update = iwm_wme_update; 6287 ic->ic_parent = iwm_parent; 6288 ic->ic_transmit = iwm_transmit; 6289 iwm_radiotap_attach(sc); 6290 if (bootverbose) 6291 ieee80211_announce(ic); 6292 6293 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6294 "<-%s\n", __func__); 6295 config_intrhook_disestablish(&sc->sc_preinit_hook); 6296 6297 return; 6298 fail: 6299 config_intrhook_disestablish(&sc->sc_preinit_hook); 6300 iwm_detach_local(sc, 0); 6301 } 6302 6303 /* 6304 * Attach the interface to 802.11 radiotap. 6305 */ 6306 static void 6307 iwm_radiotap_attach(struct iwm_softc *sc) 6308 { 6309 struct ieee80211com *ic = &sc->sc_ic; 6310 6311 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6312 "->%s begin\n", __func__); 6313 ieee80211_radiotap_attach(ic, 6314 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6315 IWM_TX_RADIOTAP_PRESENT, 6316 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6317 IWM_RX_RADIOTAP_PRESENT); 6318 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6319 "->%s end\n", __func__); 6320 } 6321 6322 static struct ieee80211vap * 6323 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6324 enum ieee80211_opmode opmode, int flags, 6325 const uint8_t bssid[IEEE80211_ADDR_LEN], 6326 const uint8_t mac[IEEE80211_ADDR_LEN]) 6327 { 6328 struct iwm_vap *ivp; 6329 struct ieee80211vap *vap; 6330 6331 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6332 return NULL; 6333 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO); 6334 vap = &ivp->iv_vap; 6335 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6336 vap->iv_bmissthreshold = 10; /* override default */ 6337 /* Override with driver methods. */ 6338 ivp->iv_newstate = vap->iv_newstate; 6339 vap->iv_newstate = iwm_newstate; 6340 6341 ivp->id = IWM_DEFAULT_MACID; 6342 ivp->color = IWM_DEFAULT_COLOR; 6343 6344 ivp->have_wme = FALSE; 6345 ivp->ps_disabled = FALSE; 6346 6347 ieee80211_ratectl_init(vap); 6348 /* Complete setup. */ 6349 ieee80211_vap_attach(vap, ieee80211_media_change, 6350 ieee80211_media_status, mac); 6351 ic->ic_opmode = opmode; 6352 6353 return vap; 6354 } 6355 6356 static void 6357 iwm_vap_delete(struct ieee80211vap *vap) 6358 { 6359 struct iwm_vap *ivp = IWM_VAP(vap); 6360 6361 ieee80211_ratectl_deinit(vap); 6362 ieee80211_vap_detach(vap); 6363 free(ivp, M_80211_VAP); 6364 } 6365 6366 static void 6367 iwm_xmit_queue_drain(struct iwm_softc *sc) 6368 { 6369 struct mbuf *m; 6370 struct ieee80211_node *ni; 6371 6372 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 6373 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 6374 ieee80211_free_node(ni); 6375 m_freem(m); 6376 } 6377 } 6378 6379 static void 6380 iwm_scan_start(struct ieee80211com *ic) 6381 { 6382 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6383 struct iwm_softc *sc = ic->ic_softc; 6384 int error; 6385 6386 IWM_LOCK(sc); 6387 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6388 /* This should not be possible */ 6389 device_printf(sc->sc_dev, 6390 "%s: Previous scan not completed yet\n", __func__); 6391 } 6392 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6393 error = iwm_umac_scan(sc); 6394 else 6395 error = iwm_lmac_scan(sc); 6396 if (error != 0) { 6397 device_printf(sc->sc_dev, "could not initiate scan\n"); 6398 IWM_UNLOCK(sc); 6399 ieee80211_cancel_scan(vap); 6400 } else { 6401 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING; 6402 iwm_led_blink_start(sc); 6403 IWM_UNLOCK(sc); 6404 } 6405 } 6406 6407 static void 6408 iwm_scan_end(struct ieee80211com *ic) 6409 { 6410 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6411 struct iwm_softc *sc = ic->ic_softc; 6412 6413 IWM_LOCK(sc); 6414 iwm_led_blink_stop(sc); 6415 if (vap->iv_state == IEEE80211_S_RUN) 6416 iwm_led_enable(sc); 6417 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6418 /* 6419 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because 6420 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq 6421 * taskqueue. 6422 */ 6423 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 6424 iwm_scan_stop_wait(sc); 6425 } 6426 IWM_UNLOCK(sc); 6427 6428 /* 6429 * Make sure we don't race, if sc_es_task is still enqueued here. 6430 * This is to make sure that it won't call ieee80211_scan_done 6431 * when we have already started the next scan. 6432 */ 6433 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL); 6434 } 6435 6436 static void 6437 iwm_update_mcast(struct ieee80211com *ic) 6438 { 6439 } 6440 6441 static void 6442 iwm_set_channel(struct ieee80211com *ic) 6443 { 6444 } 6445 6446 static void 6447 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6448 { 6449 } 6450 6451 static void 6452 iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6453 { 6454 } 6455 6456 void 6457 iwm_init_task(void *arg1) 6458 { 6459 struct iwm_softc *sc = arg1; 6460 6461 IWM_LOCK(sc); 6462 while (sc->sc_flags & IWM_FLAG_BUSY) 6463 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6464 sc->sc_flags |= IWM_FLAG_BUSY; 6465 iwm_stop(sc); 6466 if (sc->sc_ic.ic_nrunning > 0) 6467 iwm_init(sc); 6468 sc->sc_flags &= ~IWM_FLAG_BUSY; 6469 wakeup(&sc->sc_flags); 6470 IWM_UNLOCK(sc); 6471 } 6472 6473 static int 6474 iwm_resume(device_t dev) 6475 { 6476 struct iwm_softc *sc = device_get_softc(dev); 6477 int do_reinit = 0; 6478 6479 /* 6480 * We disable the RETRY_TIMEOUT register (0x41) to keep 6481 * PCI Tx retries from interfering with C3 CPU state. 6482 */ 6483 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 6484 6485 if (!sc->sc_attached) 6486 return 0; 6487 6488 iwm_init_task(device_get_softc(dev)); 6489 6490 IWM_LOCK(sc); 6491 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6492 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6493 do_reinit = 1; 6494 } 6495 IWM_UNLOCK(sc); 6496 6497 if (do_reinit) 6498 ieee80211_resume_all(&sc->sc_ic); 6499 6500 return 0; 6501 } 6502 6503 static int 6504 iwm_suspend(device_t dev) 6505 { 6506 int do_stop = 0; 6507 struct iwm_softc *sc = device_get_softc(dev); 6508 6509 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6510 6511 if (!sc->sc_attached) 6512 return (0); 6513 6514 ieee80211_suspend_all(&sc->sc_ic); 6515 6516 if (do_stop) { 6517 IWM_LOCK(sc); 6518 iwm_stop(sc); 6519 sc->sc_flags |= IWM_FLAG_SCANNING; 6520 IWM_UNLOCK(sc); 6521 } 6522 6523 return (0); 6524 } 6525 6526 static int 6527 iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6528 { 6529 struct iwm_fw_info *fw = &sc->sc_fw; 6530 device_t dev = sc->sc_dev; 6531 int i; 6532 6533 if (!sc->sc_attached) 6534 return 0; 6535 sc->sc_attached = 0; 6536 if (do_net80211) { 6537 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task); 6538 } 6539 iwm_stop_device(sc); 6540 taskqueue_drain_all(sc->sc_tq); 6541 taskqueue_free(sc->sc_tq); 6542 if (do_net80211) { 6543 IWM_LOCK(sc); 6544 iwm_xmit_queue_drain(sc); 6545 IWM_UNLOCK(sc); 6546 ieee80211_ifdetach(&sc->sc_ic); 6547 } 6548 callout_drain(&sc->sc_led_blink_to); 6549 callout_drain(&sc->sc_watchdog_to); 6550 6551 iwm_phy_db_free(sc->sc_phy_db); 6552 sc->sc_phy_db = NULL; 6553 6554 iwm_free_nvm_data(sc->nvm_data); 6555 6556 /* Free descriptor rings */ 6557 iwm_free_rx_ring(sc, &sc->rxq); 6558 for (i = 0; i < nitems(sc->txq); i++) 6559 iwm_free_tx_ring(sc, &sc->txq[i]); 6560 6561 /* Free firmware */ 6562 if (fw->fw_fp != NULL) 6563 iwm_fw_info_free(fw); 6564 6565 /* Free scheduler */ 6566 iwm_dma_contig_free(&sc->sched_dma); 6567 iwm_dma_contig_free(&sc->ict_dma); 6568 iwm_dma_contig_free(&sc->kw_dma); 6569 iwm_dma_contig_free(&sc->fw_dma); 6570 6571 iwm_free_fw_paging(sc); 6572 6573 /* Finished with the hardware - detach things */ 6574 iwm_pci_detach(dev); 6575 6576 if (sc->sc_notif_wait != NULL) { 6577 iwm_notification_wait_free(sc->sc_notif_wait); 6578 sc->sc_notif_wait = NULL; 6579 } 6580 6581 IWM_LOCK_DESTROY(sc); 6582 6583 return (0); 6584 } 6585 6586 static int 6587 iwm_detach(device_t dev) 6588 { 6589 struct iwm_softc *sc = device_get_softc(dev); 6590 6591 return (iwm_detach_local(sc, 1)); 6592 } 6593 6594 static device_method_t iwm_pci_methods[] = { 6595 /* Device interface */ 6596 DEVMETHOD(device_probe, iwm_probe), 6597 DEVMETHOD(device_attach, iwm_attach), 6598 DEVMETHOD(device_detach, iwm_detach), 6599 DEVMETHOD(device_suspend, iwm_suspend), 6600 DEVMETHOD(device_resume, iwm_resume), 6601 6602 DEVMETHOD_END 6603 }; 6604 6605 static driver_t iwm_pci_driver = { 6606 "iwm", 6607 iwm_pci_methods, 6608 sizeof (struct iwm_softc) 6609 }; 6610 6611 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL); 6612 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver, 6613 iwm_devices, nitems(iwm_devices)); 6614 MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6615 MODULE_DEPEND(iwm, pci, 1, 1, 1); 6616 MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6617