1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90 /*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105 #include <sys/cdefs.h> 106 __FBSDID("$FreeBSD$"); 107 108 #include "opt_wlan.h" 109 #include "opt_iwm.h" 110 111 #include <sys/param.h> 112 #include <sys/bus.h> 113 #include <sys/conf.h> 114 #include <sys/endian.h> 115 #include <sys/firmware.h> 116 #include <sys/kernel.h> 117 #include <sys/malloc.h> 118 #include <sys/mbuf.h> 119 #include <sys/mutex.h> 120 #include <sys/module.h> 121 #include <sys/proc.h> 122 #include <sys/rman.h> 123 #include <sys/socket.h> 124 #include <sys/sockio.h> 125 #include <sys/sysctl.h> 126 #include <sys/linker.h> 127 128 #include <machine/bus.h> 129 #include <machine/endian.h> 130 #include <machine/resource.h> 131 132 #include <dev/pci/pcivar.h> 133 #include <dev/pci/pcireg.h> 134 135 #include <net/bpf.h> 136 137 #include <net/if.h> 138 #include <net/if_var.h> 139 #include <net/if_arp.h> 140 #include <net/if_dl.h> 141 #include <net/if_media.h> 142 #include <net/if_types.h> 143 144 #include <netinet/in.h> 145 #include <netinet/in_systm.h> 146 #include <netinet/if_ether.h> 147 #include <netinet/ip.h> 148 149 #include <net80211/ieee80211_var.h> 150 #include <net80211/ieee80211_regdomain.h> 151 #include <net80211/ieee80211_ratectl.h> 152 #include <net80211/ieee80211_radiotap.h> 153 154 #include <dev/iwm/if_iwmreg.h> 155 #include <dev/iwm/if_iwmvar.h> 156 #include <dev/iwm/if_iwm_config.h> 157 #include <dev/iwm/if_iwm_debug.h> 158 #include <dev/iwm/if_iwm_notif_wait.h> 159 #include <dev/iwm/if_iwm_util.h> 160 #include <dev/iwm/if_iwm_binding.h> 161 #include <dev/iwm/if_iwm_phy_db.h> 162 #include <dev/iwm/if_iwm_mac_ctxt.h> 163 #include <dev/iwm/if_iwm_phy_ctxt.h> 164 #include <dev/iwm/if_iwm_time_event.h> 165 #include <dev/iwm/if_iwm_power.h> 166 #include <dev/iwm/if_iwm_scan.h> 167 #include <dev/iwm/if_iwm_sf.h> 168 #include <dev/iwm/if_iwm_sta.h> 169 170 #include <dev/iwm/if_iwm_pcie_trans.h> 171 #include <dev/iwm/if_iwm_led.h> 172 #include <dev/iwm/if_iwm_fw.h> 173 174 /* From DragonflyBSD */ 175 #define mtodoff(m, t, off) ((t)((m)->m_data + (off))) 176 177 const uint8_t iwm_nvm_channels[] = { 178 /* 2.4 GHz */ 179 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 180 /* 5 GHz */ 181 36, 40, 44, 48, 52, 56, 60, 64, 182 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 183 149, 153, 157, 161, 165 184 }; 185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 186 "IWM_NUM_CHANNELS is too small"); 187 188 const uint8_t iwm_nvm_channels_8000[] = { 189 /* 2.4 GHz */ 190 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 191 /* 5 GHz */ 192 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 193 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 194 149, 153, 157, 161, 165, 169, 173, 177, 181 195 }; 196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 197 "IWM_NUM_CHANNELS_8000 is too small"); 198 199 #define IWM_NUM_2GHZ_CHANNELS 14 200 #define IWM_N_HW_ADDR_MASK 0xF 201 202 /* 203 * XXX For now, there's simply a fixed set of rate table entries 204 * that are populated. 205 */ 206 const struct iwm_rate { 207 uint8_t rate; 208 uint8_t plcp; 209 } iwm_rates[] = { 210 { 2, IWM_RATE_1M_PLCP }, 211 { 4, IWM_RATE_2M_PLCP }, 212 { 11, IWM_RATE_5M_PLCP }, 213 { 22, IWM_RATE_11M_PLCP }, 214 { 12, IWM_RATE_6M_PLCP }, 215 { 18, IWM_RATE_9M_PLCP }, 216 { 24, IWM_RATE_12M_PLCP }, 217 { 36, IWM_RATE_18M_PLCP }, 218 { 48, IWM_RATE_24M_PLCP }, 219 { 72, IWM_RATE_36M_PLCP }, 220 { 96, IWM_RATE_48M_PLCP }, 221 { 108, IWM_RATE_54M_PLCP }, 222 }; 223 #define IWM_RIDX_CCK 0 224 #define IWM_RIDX_OFDM 4 225 #define IWM_RIDX_MAX (nitems(iwm_rates)-1) 226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 228 229 struct iwm_nvm_section { 230 uint16_t length; 231 uint8_t *data; 232 }; 233 234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz 235 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz) 236 237 struct iwm_mvm_alive_data { 238 int valid; 239 uint32_t scd_base_addr; 240 }; 241 242 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 243 static int iwm_firmware_store_section(struct iwm_softc *, 244 enum iwm_ucode_type, 245 const uint8_t *, size_t); 246 static int iwm_set_default_calib(struct iwm_softc *, const void *); 247 static void iwm_fw_info_free(struct iwm_fw_info *); 248 static int iwm_read_firmware(struct iwm_softc *); 249 static int iwm_alloc_fwmem(struct iwm_softc *); 250 static int iwm_alloc_sched(struct iwm_softc *); 251 static int iwm_alloc_kw(struct iwm_softc *); 252 static int iwm_alloc_ict(struct iwm_softc *); 253 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 254 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 255 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 256 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 257 int); 258 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 259 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 260 static void iwm_enable_interrupts(struct iwm_softc *); 261 static void iwm_restore_interrupts(struct iwm_softc *); 262 static void iwm_disable_interrupts(struct iwm_softc *); 263 static void iwm_ict_reset(struct iwm_softc *); 264 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 265 static void iwm_stop_device(struct iwm_softc *); 266 static void iwm_mvm_nic_config(struct iwm_softc *); 267 static int iwm_nic_rx_init(struct iwm_softc *); 268 static int iwm_nic_tx_init(struct iwm_softc *); 269 static int iwm_nic_init(struct iwm_softc *); 270 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t); 271 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 272 uint16_t, uint8_t *, uint16_t *); 273 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 274 uint16_t *, uint32_t); 275 static uint32_t iwm_eeprom_channel_flags(uint16_t); 276 static void iwm_add_channel_band(struct iwm_softc *, 277 struct ieee80211_channel[], int, int *, int, size_t, 278 const uint8_t[]); 279 static void iwm_init_channel_map(struct ieee80211com *, int, int *, 280 struct ieee80211_channel[]); 281 static struct iwm_nvm_data * 282 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 283 const uint16_t *, const uint16_t *, 284 const uint16_t *, const uint16_t *, 285 const uint16_t *); 286 static void iwm_free_nvm_data(struct iwm_nvm_data *); 287 static void iwm_set_hw_address_family_8000(struct iwm_softc *, 288 struct iwm_nvm_data *, 289 const uint16_t *, 290 const uint16_t *); 291 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 292 const uint16_t *); 293 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 294 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 295 const uint16_t *); 296 static int iwm_get_n_hw_addrs(const struct iwm_softc *, 297 const uint16_t *); 298 static void iwm_set_radio_cfg(const struct iwm_softc *, 299 struct iwm_nvm_data *, uint32_t); 300 static struct iwm_nvm_data * 301 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); 302 static int iwm_nvm_init(struct iwm_softc *); 303 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, 304 const struct iwm_fw_desc *); 305 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, 306 bus_addr_t, uint32_t); 307 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 308 const struct iwm_fw_img *, 309 int, int *); 310 static int iwm_pcie_load_cpu_sections(struct iwm_softc *, 311 const struct iwm_fw_img *, 312 int, int *); 313 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, 314 const struct iwm_fw_img *); 315 static int iwm_pcie_load_given_ucode(struct iwm_softc *, 316 const struct iwm_fw_img *); 317 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *); 318 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 319 static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 320 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, 321 enum iwm_ucode_type); 322 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int); 323 static int iwm_mvm_config_ltr(struct iwm_softc *sc); 324 static int iwm_rx_addbuf(struct iwm_softc *, int, int); 325 static int iwm_mvm_get_signal_strength(struct iwm_softc *, 326 struct iwm_rx_phy_info *); 327 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, 328 struct iwm_rx_packet *); 329 static int iwm_get_noise(struct iwm_softc *, 330 const struct iwm_mvm_statistics_rx_non_phy *); 331 static void iwm_mvm_handle_rx_statistics(struct iwm_softc *, 332 struct iwm_rx_packet *); 333 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *, 334 uint32_t, boolean_t); 335 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, 336 struct iwm_rx_packet *, 337 struct iwm_node *); 338 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *); 339 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 340 #if 0 341 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 342 uint16_t); 343 #endif 344 static const struct iwm_rate * 345 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 346 struct mbuf *, struct iwm_tx_cmd *); 347 static int iwm_tx(struct iwm_softc *, struct mbuf *, 348 struct ieee80211_node *, int); 349 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 350 const struct ieee80211_bpf_params *); 351 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *); 352 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 353 static struct ieee80211_node * 354 iwm_node_alloc(struct ieee80211vap *, 355 const uint8_t[IEEE80211_ADDR_LEN]); 356 static uint8_t iwm_rate_from_ucode_rate(uint32_t); 357 static int iwm_rate2ridx(struct iwm_softc *, uint8_t); 358 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int); 359 static int iwm_media_change(struct ifnet *); 360 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 361 static void iwm_endscan_cb(void *, int); 362 static int iwm_send_bt_init_conf(struct iwm_softc *); 363 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *); 364 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *); 365 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 366 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t); 367 static int iwm_init_hw(struct iwm_softc *); 368 static void iwm_init(struct iwm_softc *); 369 static void iwm_start(struct iwm_softc *); 370 static void iwm_stop(struct iwm_softc *); 371 static void iwm_watchdog(void *); 372 static void iwm_parent(struct ieee80211com *); 373 #ifdef IWM_DEBUG 374 static const char * 375 iwm_desc_lookup(uint32_t); 376 static void iwm_nic_error(struct iwm_softc *); 377 static void iwm_nic_umac_error(struct iwm_softc *); 378 #endif 379 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *); 380 static void iwm_notif_intr(struct iwm_softc *); 381 static void iwm_intr(void *); 382 static int iwm_attach(device_t); 383 static int iwm_is_valid_ether_addr(uint8_t *); 384 static void iwm_preinit(void *); 385 static int iwm_detach_local(struct iwm_softc *sc, int); 386 static void iwm_init_task(void *); 387 static void iwm_radiotap_attach(struct iwm_softc *); 388 static struct ieee80211vap * 389 iwm_vap_create(struct ieee80211com *, 390 const char [IFNAMSIZ], int, 391 enum ieee80211_opmode, int, 392 const uint8_t [IEEE80211_ADDR_LEN], 393 const uint8_t [IEEE80211_ADDR_LEN]); 394 static void iwm_vap_delete(struct ieee80211vap *); 395 static void iwm_xmit_queue_drain(struct iwm_softc *); 396 static void iwm_scan_start(struct ieee80211com *); 397 static void iwm_scan_end(struct ieee80211com *); 398 static void iwm_update_mcast(struct ieee80211com *); 399 static void iwm_set_channel(struct ieee80211com *); 400 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 401 static void iwm_scan_mindwell(struct ieee80211_scan_state *); 402 static int iwm_detach(device_t); 403 404 static int iwm_lar_disable = 0; 405 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable); 406 407 /* 408 * Firmware parser. 409 */ 410 411 static int 412 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 413 { 414 const struct iwm_fw_cscheme_list *l = (const void *)data; 415 416 if (dlen < sizeof(*l) || 417 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 418 return EINVAL; 419 420 /* we don't actually store anything for now, always use s/w crypto */ 421 422 return 0; 423 } 424 425 static int 426 iwm_firmware_store_section(struct iwm_softc *sc, 427 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 428 { 429 struct iwm_fw_img *fws; 430 struct iwm_fw_desc *fwone; 431 432 if (type >= IWM_UCODE_TYPE_MAX) 433 return EINVAL; 434 if (dlen < sizeof(uint32_t)) 435 return EINVAL; 436 437 fws = &sc->sc_fw.img[type]; 438 if (fws->fw_count >= IWM_UCODE_SECTION_MAX) 439 return EINVAL; 440 441 fwone = &fws->sec[fws->fw_count]; 442 443 /* first 32bit are device load offset */ 444 memcpy(&fwone->offset, data, sizeof(uint32_t)); 445 446 /* rest is data */ 447 fwone->data = data + sizeof(uint32_t); 448 fwone->len = dlen - sizeof(uint32_t); 449 450 fws->fw_count++; 451 452 return 0; 453 } 454 455 #define IWM_DEFAULT_SCAN_CHANNELS 40 456 457 /* iwlwifi: iwl-drv.c */ 458 struct iwm_tlv_calib_data { 459 uint32_t ucode_type; 460 struct iwm_tlv_calib_ctrl calib; 461 } __packed; 462 463 static int 464 iwm_set_default_calib(struct iwm_softc *sc, const void *data) 465 { 466 const struct iwm_tlv_calib_data *def_calib = data; 467 uint32_t ucode_type = le32toh(def_calib->ucode_type); 468 469 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 470 device_printf(sc->sc_dev, 471 "Wrong ucode_type %u for default " 472 "calibration.\n", ucode_type); 473 return EINVAL; 474 } 475 476 sc->sc_default_calib[ucode_type].flow_trigger = 477 def_calib->calib.flow_trigger; 478 sc->sc_default_calib[ucode_type].event_trigger = 479 def_calib->calib.event_trigger; 480 481 return 0; 482 } 483 484 static int 485 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, 486 struct iwm_ucode_capabilities *capa) 487 { 488 const struct iwm_ucode_api *ucode_api = (const void *)data; 489 uint32_t api_index = le32toh(ucode_api->api_index); 490 uint32_t api_flags = le32toh(ucode_api->api_flags); 491 int i; 492 493 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) { 494 device_printf(sc->sc_dev, 495 "api flags index %d larger than supported by driver\n", 496 api_index); 497 /* don't return an error so we can load FW that has more bits */ 498 return 0; 499 } 500 501 for (i = 0; i < 32; i++) { 502 if (api_flags & (1U << i)) 503 setbit(capa->enabled_api, i + 32 * api_index); 504 } 505 506 return 0; 507 } 508 509 static int 510 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, 511 struct iwm_ucode_capabilities *capa) 512 { 513 const struct iwm_ucode_capa *ucode_capa = (const void *)data; 514 uint32_t api_index = le32toh(ucode_capa->api_index); 515 uint32_t api_flags = le32toh(ucode_capa->api_capa); 516 int i; 517 518 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 519 device_printf(sc->sc_dev, 520 "capa flags index %d larger than supported by driver\n", 521 api_index); 522 /* don't return an error so we can load FW that has more bits */ 523 return 0; 524 } 525 526 for (i = 0; i < 32; i++) { 527 if (api_flags & (1U << i)) 528 setbit(capa->enabled_capa, i + 32 * api_index); 529 } 530 531 return 0; 532 } 533 534 static void 535 iwm_fw_info_free(struct iwm_fw_info *fw) 536 { 537 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 538 fw->fw_fp = NULL; 539 memset(fw->img, 0, sizeof(fw->img)); 540 } 541 542 static int 543 iwm_read_firmware(struct iwm_softc *sc) 544 { 545 struct iwm_fw_info *fw = &sc->sc_fw; 546 const struct iwm_tlv_ucode_header *uhdr; 547 const struct iwm_ucode_tlv *tlv; 548 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa; 549 enum iwm_ucode_tlv_type tlv_type; 550 const struct firmware *fwp; 551 const uint8_t *data; 552 uint32_t tlv_len; 553 uint32_t usniffer_img; 554 const uint8_t *tlv_data; 555 uint32_t paging_mem_size; 556 int num_of_cpus; 557 int error = 0; 558 size_t len; 559 560 /* 561 * Load firmware into driver memory. 562 * fw_fp will be set. 563 */ 564 fwp = firmware_get(sc->cfg->fw_name); 565 if (fwp == NULL) { 566 device_printf(sc->sc_dev, 567 "could not read firmware %s (error %d)\n", 568 sc->cfg->fw_name, error); 569 goto out; 570 } 571 fw->fw_fp = fwp; 572 573 /* (Re-)Initialize default values. */ 574 capa->flags = 0; 575 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH; 576 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS; 577 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa)); 578 memset(capa->enabled_api, 0, sizeof(capa->enabled_api)); 579 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 580 581 /* 582 * Parse firmware contents 583 */ 584 585 uhdr = (const void *)fw->fw_fp->data; 586 if (*(const uint32_t *)fw->fw_fp->data != 0 587 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 588 device_printf(sc->sc_dev, "invalid firmware %s\n", 589 sc->cfg->fw_name); 590 error = EINVAL; 591 goto out; 592 } 593 594 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)", 595 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 596 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 597 IWM_UCODE_API(le32toh(uhdr->ver))); 598 data = uhdr->data; 599 len = fw->fw_fp->datasize - sizeof(*uhdr); 600 601 while (len >= sizeof(*tlv)) { 602 len -= sizeof(*tlv); 603 tlv = (const void *)data; 604 605 tlv_len = le32toh(tlv->length); 606 tlv_type = le32toh(tlv->type); 607 tlv_data = tlv->data; 608 609 if (len < tlv_len) { 610 device_printf(sc->sc_dev, 611 "firmware too short: %zu bytes\n", 612 len); 613 error = EINVAL; 614 goto parse_out; 615 } 616 len -= roundup2(tlv_len, 4); 617 data += sizeof(*tlv) + roundup2(tlv_len, 4); 618 619 switch ((int)tlv_type) { 620 case IWM_UCODE_TLV_PROBE_MAX_LEN: 621 if (tlv_len != sizeof(uint32_t)) { 622 device_printf(sc->sc_dev, 623 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n", 624 __func__, tlv_len); 625 error = EINVAL; 626 goto parse_out; 627 } 628 capa->max_probe_length = 629 le32_to_cpup((const uint32_t *)tlv_data); 630 /* limit it to something sensible */ 631 if (capa->max_probe_length > 632 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 633 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 634 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 635 "ridiculous\n", __func__); 636 error = EINVAL; 637 goto parse_out; 638 } 639 break; 640 case IWM_UCODE_TLV_PAN: 641 if (tlv_len) { 642 device_printf(sc->sc_dev, 643 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n", 644 __func__, tlv_len); 645 error = EINVAL; 646 goto parse_out; 647 } 648 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN; 649 break; 650 case IWM_UCODE_TLV_FLAGS: 651 if (tlv_len < sizeof(uint32_t)) { 652 device_printf(sc->sc_dev, 653 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n", 654 __func__, tlv_len); 655 error = EINVAL; 656 goto parse_out; 657 } 658 if (tlv_len % sizeof(uint32_t)) { 659 device_printf(sc->sc_dev, 660 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n", 661 __func__, tlv_len); 662 error = EINVAL; 663 goto parse_out; 664 } 665 /* 666 * Apparently there can be many flags, but Linux driver 667 * parses only the first one, and so do we. 668 * 669 * XXX: why does this override IWM_UCODE_TLV_PAN? 670 * Intentional or a bug? Observations from 671 * current firmware file: 672 * 1) TLV_PAN is parsed first 673 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 674 * ==> this resets TLV_PAN to itself... hnnnk 675 */ 676 capa->flags = le32_to_cpup((const uint32_t *)tlv_data); 677 break; 678 case IWM_UCODE_TLV_CSCHEME: 679 if ((error = iwm_store_cscheme(sc, 680 tlv_data, tlv_len)) != 0) { 681 device_printf(sc->sc_dev, 682 "%s: iwm_store_cscheme(): returned %d\n", 683 __func__, error); 684 goto parse_out; 685 } 686 break; 687 case IWM_UCODE_TLV_NUM_OF_CPU: 688 if (tlv_len != sizeof(uint32_t)) { 689 device_printf(sc->sc_dev, 690 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n", 691 __func__, tlv_len); 692 error = EINVAL; 693 goto parse_out; 694 } 695 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data); 696 if (num_of_cpus == 2) { 697 fw->img[IWM_UCODE_REGULAR].is_dual_cpus = 698 TRUE; 699 fw->img[IWM_UCODE_INIT].is_dual_cpus = 700 TRUE; 701 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus = 702 TRUE; 703 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { 704 device_printf(sc->sc_dev, 705 "%s: Driver supports only 1 or 2 CPUs\n", 706 __func__); 707 error = EINVAL; 708 goto parse_out; 709 } 710 break; 711 case IWM_UCODE_TLV_SEC_RT: 712 if ((error = iwm_firmware_store_section(sc, 713 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) { 714 device_printf(sc->sc_dev, 715 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n", 716 __func__, error); 717 goto parse_out; 718 } 719 break; 720 case IWM_UCODE_TLV_SEC_INIT: 721 if ((error = iwm_firmware_store_section(sc, 722 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) { 723 device_printf(sc->sc_dev, 724 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n", 725 __func__, error); 726 goto parse_out; 727 } 728 break; 729 case IWM_UCODE_TLV_SEC_WOWLAN: 730 if ((error = iwm_firmware_store_section(sc, 731 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) { 732 device_printf(sc->sc_dev, 733 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n", 734 __func__, error); 735 goto parse_out; 736 } 737 break; 738 case IWM_UCODE_TLV_DEF_CALIB: 739 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 740 device_printf(sc->sc_dev, 741 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n", 742 __func__, tlv_len, 743 sizeof(struct iwm_tlv_calib_data)); 744 error = EINVAL; 745 goto parse_out; 746 } 747 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 748 device_printf(sc->sc_dev, 749 "%s: iwm_set_default_calib() failed: %d\n", 750 __func__, error); 751 goto parse_out; 752 } 753 break; 754 case IWM_UCODE_TLV_PHY_SKU: 755 if (tlv_len != sizeof(uint32_t)) { 756 error = EINVAL; 757 device_printf(sc->sc_dev, 758 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n", 759 __func__, tlv_len); 760 goto parse_out; 761 } 762 sc->sc_fw.phy_config = 763 le32_to_cpup((const uint32_t *)tlv_data); 764 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config & 765 IWM_FW_PHY_CFG_TX_CHAIN) >> 766 IWM_FW_PHY_CFG_TX_CHAIN_POS; 767 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config & 768 IWM_FW_PHY_CFG_RX_CHAIN) >> 769 IWM_FW_PHY_CFG_RX_CHAIN_POS; 770 break; 771 772 case IWM_UCODE_TLV_API_CHANGES_SET: { 773 if (tlv_len != sizeof(struct iwm_ucode_api)) { 774 error = EINVAL; 775 goto parse_out; 776 } 777 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) { 778 error = EINVAL; 779 goto parse_out; 780 } 781 break; 782 } 783 784 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 785 if (tlv_len != sizeof(struct iwm_ucode_capa)) { 786 error = EINVAL; 787 goto parse_out; 788 } 789 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) { 790 error = EINVAL; 791 goto parse_out; 792 } 793 break; 794 } 795 796 case 48: /* undocumented TLV */ 797 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 798 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 799 /* ignore, not used by current driver */ 800 break; 801 802 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 803 if ((error = iwm_firmware_store_section(sc, 804 IWM_UCODE_REGULAR_USNIFFER, tlv_data, 805 tlv_len)) != 0) 806 goto parse_out; 807 break; 808 809 case IWM_UCODE_TLV_PAGING: 810 if (tlv_len != sizeof(uint32_t)) { 811 error = EINVAL; 812 goto parse_out; 813 } 814 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data); 815 816 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 817 "%s: Paging: paging enabled (size = %u bytes)\n", 818 __func__, paging_mem_size); 819 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) { 820 device_printf(sc->sc_dev, 821 "%s: Paging: driver supports up to %u bytes for paging image\n", 822 __func__, IWM_MAX_PAGING_IMAGE_SIZE); 823 error = EINVAL; 824 goto out; 825 } 826 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) { 827 device_printf(sc->sc_dev, 828 "%s: Paging: image isn't multiple %u\n", 829 __func__, IWM_FW_PAGING_SIZE); 830 error = EINVAL; 831 goto out; 832 } 833 834 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size = 835 paging_mem_size; 836 usniffer_img = IWM_UCODE_REGULAR_USNIFFER; 837 sc->sc_fw.img[usniffer_img].paging_mem_size = 838 paging_mem_size; 839 break; 840 841 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 842 if (tlv_len != sizeof(uint32_t)) { 843 error = EINVAL; 844 goto parse_out; 845 } 846 capa->n_scan_channels = 847 le32_to_cpup((const uint32_t *)tlv_data); 848 break; 849 850 case IWM_UCODE_TLV_FW_VERSION: 851 if (tlv_len != sizeof(uint32_t) * 3) { 852 error = EINVAL; 853 goto parse_out; 854 } 855 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 856 "%d.%d.%d", 857 le32toh(((const uint32_t *)tlv_data)[0]), 858 le32toh(((const uint32_t *)tlv_data)[1]), 859 le32toh(((const uint32_t *)tlv_data)[2])); 860 break; 861 862 case IWM_UCODE_TLV_FW_MEM_SEG: 863 break; 864 865 default: 866 device_printf(sc->sc_dev, 867 "%s: unknown firmware section %d, abort\n", 868 __func__, tlv_type); 869 error = EINVAL; 870 goto parse_out; 871 } 872 } 873 874 KASSERT(error == 0, ("unhandled error")); 875 876 parse_out: 877 if (error) { 878 device_printf(sc->sc_dev, "firmware parse error %d, " 879 "section type %d\n", error, tlv_type); 880 } 881 882 out: 883 if (error) { 884 if (fw->fw_fp != NULL) 885 iwm_fw_info_free(fw); 886 } 887 888 return error; 889 } 890 891 /* 892 * DMA resource routines 893 */ 894 895 /* fwmem is used to load firmware onto the card */ 896 static int 897 iwm_alloc_fwmem(struct iwm_softc *sc) 898 { 899 /* Must be aligned on a 16-byte boundary. */ 900 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 901 IWM_FH_MEM_TB_MAX_LENGTH, 16); 902 } 903 904 /* tx scheduler rings. not used? */ 905 static int 906 iwm_alloc_sched(struct iwm_softc *sc) 907 { 908 /* TX scheduler rings must be aligned on a 1KB boundary. */ 909 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 910 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 911 } 912 913 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 914 static int 915 iwm_alloc_kw(struct iwm_softc *sc) 916 { 917 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 918 } 919 920 /* interrupt cause table */ 921 static int 922 iwm_alloc_ict(struct iwm_softc *sc) 923 { 924 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 925 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 926 } 927 928 static int 929 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 930 { 931 bus_size_t size; 932 int i, error; 933 934 ring->cur = 0; 935 936 /* Allocate RX descriptors (256-byte aligned). */ 937 size = IWM_RX_RING_COUNT * sizeof(uint32_t); 938 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 939 if (error != 0) { 940 device_printf(sc->sc_dev, 941 "could not allocate RX ring DMA memory\n"); 942 goto fail; 943 } 944 ring->desc = ring->desc_dma.vaddr; 945 946 /* Allocate RX status area (16-byte aligned). */ 947 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 948 sizeof(*ring->stat), 16); 949 if (error != 0) { 950 device_printf(sc->sc_dev, 951 "could not allocate RX status DMA memory\n"); 952 goto fail; 953 } 954 ring->stat = ring->stat_dma.vaddr; 955 956 /* Create RX buffer DMA tag. */ 957 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 958 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 959 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 960 if (error != 0) { 961 device_printf(sc->sc_dev, 962 "%s: could not create RX buf DMA tag, error %d\n", 963 __func__, error); 964 goto fail; 965 } 966 967 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 968 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 969 if (error != 0) { 970 device_printf(sc->sc_dev, 971 "%s: could not create RX buf DMA map, error %d\n", 972 __func__, error); 973 goto fail; 974 } 975 /* 976 * Allocate and map RX buffers. 977 */ 978 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 979 struct iwm_rx_data *data = &ring->data[i]; 980 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 981 if (error != 0) { 982 device_printf(sc->sc_dev, 983 "%s: could not create RX buf DMA map, error %d\n", 984 __func__, error); 985 goto fail; 986 } 987 data->m = NULL; 988 989 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 990 goto fail; 991 } 992 } 993 return 0; 994 995 fail: iwm_free_rx_ring(sc, ring); 996 return error; 997 } 998 999 static void 1000 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1001 { 1002 /* Reset the ring state */ 1003 ring->cur = 0; 1004 1005 /* 1006 * The hw rx ring index in shared memory must also be cleared, 1007 * otherwise the discrepancy can cause reprocessing chaos. 1008 */ 1009 if (sc->rxq.stat) 1010 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1011 } 1012 1013 static void 1014 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1015 { 1016 int i; 1017 1018 iwm_dma_contig_free(&ring->desc_dma); 1019 iwm_dma_contig_free(&ring->stat_dma); 1020 1021 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1022 struct iwm_rx_data *data = &ring->data[i]; 1023 1024 if (data->m != NULL) { 1025 bus_dmamap_sync(ring->data_dmat, data->map, 1026 BUS_DMASYNC_POSTREAD); 1027 bus_dmamap_unload(ring->data_dmat, data->map); 1028 m_freem(data->m); 1029 data->m = NULL; 1030 } 1031 if (data->map != NULL) { 1032 bus_dmamap_destroy(ring->data_dmat, data->map); 1033 data->map = NULL; 1034 } 1035 } 1036 if (ring->spare_map != NULL) { 1037 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1038 ring->spare_map = NULL; 1039 } 1040 if (ring->data_dmat != NULL) { 1041 bus_dma_tag_destroy(ring->data_dmat); 1042 ring->data_dmat = NULL; 1043 } 1044 } 1045 1046 static int 1047 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1048 { 1049 bus_addr_t paddr; 1050 bus_size_t size; 1051 size_t maxsize; 1052 int nsegments; 1053 int i, error; 1054 1055 ring->qid = qid; 1056 ring->queued = 0; 1057 ring->cur = 0; 1058 1059 /* Allocate TX descriptors (256-byte aligned). */ 1060 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1061 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1062 if (error != 0) { 1063 device_printf(sc->sc_dev, 1064 "could not allocate TX ring DMA memory\n"); 1065 goto fail; 1066 } 1067 ring->desc = ring->desc_dma.vaddr; 1068 1069 /* 1070 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1071 * to allocate commands space for other rings. 1072 */ 1073 if (qid > IWM_MVM_CMD_QUEUE) 1074 return 0; 1075 1076 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1077 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1078 if (error != 0) { 1079 device_printf(sc->sc_dev, 1080 "could not allocate TX cmd DMA memory\n"); 1081 goto fail; 1082 } 1083 ring->cmd = ring->cmd_dma.vaddr; 1084 1085 /* FW commands may require more mapped space than packets. */ 1086 if (qid == IWM_MVM_CMD_QUEUE) { 1087 maxsize = IWM_RBUF_SIZE; 1088 nsegments = 1; 1089 } else { 1090 maxsize = MCLBYTES; 1091 nsegments = IWM_MAX_SCATTER - 2; 1092 } 1093 1094 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1095 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1096 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat); 1097 if (error != 0) { 1098 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1099 goto fail; 1100 } 1101 1102 paddr = ring->cmd_dma.paddr; 1103 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1104 struct iwm_tx_data *data = &ring->data[i]; 1105 1106 data->cmd_paddr = paddr; 1107 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1108 + offsetof(struct iwm_tx_cmd, scratch); 1109 paddr += sizeof(struct iwm_device_cmd); 1110 1111 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1112 if (error != 0) { 1113 device_printf(sc->sc_dev, 1114 "could not create TX buf DMA map\n"); 1115 goto fail; 1116 } 1117 } 1118 KASSERT(paddr == ring->cmd_dma.paddr + size, 1119 ("invalid physical address")); 1120 return 0; 1121 1122 fail: iwm_free_tx_ring(sc, ring); 1123 return error; 1124 } 1125 1126 static void 1127 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1128 { 1129 int i; 1130 1131 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1132 struct iwm_tx_data *data = &ring->data[i]; 1133 1134 if (data->m != NULL) { 1135 bus_dmamap_sync(ring->data_dmat, data->map, 1136 BUS_DMASYNC_POSTWRITE); 1137 bus_dmamap_unload(ring->data_dmat, data->map); 1138 m_freem(data->m); 1139 data->m = NULL; 1140 } 1141 } 1142 /* Clear TX descriptors. */ 1143 memset(ring->desc, 0, ring->desc_dma.size); 1144 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1145 BUS_DMASYNC_PREWRITE); 1146 sc->qfullmsk &= ~(1 << ring->qid); 1147 ring->queued = 0; 1148 ring->cur = 0; 1149 1150 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake) 1151 iwm_pcie_clear_cmd_in_flight(sc); 1152 } 1153 1154 static void 1155 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1156 { 1157 int i; 1158 1159 iwm_dma_contig_free(&ring->desc_dma); 1160 iwm_dma_contig_free(&ring->cmd_dma); 1161 1162 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1163 struct iwm_tx_data *data = &ring->data[i]; 1164 1165 if (data->m != NULL) { 1166 bus_dmamap_sync(ring->data_dmat, data->map, 1167 BUS_DMASYNC_POSTWRITE); 1168 bus_dmamap_unload(ring->data_dmat, data->map); 1169 m_freem(data->m); 1170 data->m = NULL; 1171 } 1172 if (data->map != NULL) { 1173 bus_dmamap_destroy(ring->data_dmat, data->map); 1174 data->map = NULL; 1175 } 1176 } 1177 if (ring->data_dmat != NULL) { 1178 bus_dma_tag_destroy(ring->data_dmat); 1179 ring->data_dmat = NULL; 1180 } 1181 } 1182 1183 /* 1184 * High-level hardware frobbing routines 1185 */ 1186 1187 static void 1188 iwm_enable_interrupts(struct iwm_softc *sc) 1189 { 1190 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1191 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1192 } 1193 1194 static void 1195 iwm_restore_interrupts(struct iwm_softc *sc) 1196 { 1197 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1198 } 1199 1200 static void 1201 iwm_disable_interrupts(struct iwm_softc *sc) 1202 { 1203 /* disable interrupts */ 1204 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1205 1206 /* acknowledge all interrupts */ 1207 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1208 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1209 } 1210 1211 static void 1212 iwm_ict_reset(struct iwm_softc *sc) 1213 { 1214 iwm_disable_interrupts(sc); 1215 1216 /* Reset ICT table. */ 1217 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1218 sc->ict_cur = 0; 1219 1220 /* Set physical address of ICT table (4KB aligned). */ 1221 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1222 IWM_CSR_DRAM_INT_TBL_ENABLE 1223 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1224 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1225 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1226 1227 /* Switch to ICT interrupt mode in driver. */ 1228 sc->sc_flags |= IWM_FLAG_USE_ICT; 1229 1230 /* Re-enable interrupts. */ 1231 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1232 iwm_enable_interrupts(sc); 1233 } 1234 1235 /* iwlwifi pcie/trans.c */ 1236 1237 /* 1238 * Since this .. hard-resets things, it's time to actually 1239 * mark the first vap (if any) as having no mac context. 1240 * It's annoying, but since the driver is potentially being 1241 * stop/start'ed whilst active (thanks openbsd port!) we 1242 * have to correctly track this. 1243 */ 1244 static void 1245 iwm_stop_device(struct iwm_softc *sc) 1246 { 1247 struct ieee80211com *ic = &sc->sc_ic; 1248 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1249 int chnl, qid; 1250 uint32_t mask = 0; 1251 1252 /* tell the device to stop sending interrupts */ 1253 iwm_disable_interrupts(sc); 1254 1255 /* 1256 * FreeBSD-local: mark the first vap as not-uploaded, 1257 * so the next transition through auth/assoc 1258 * will correctly populate the MAC context. 1259 */ 1260 if (vap) { 1261 struct iwm_vap *iv = IWM_VAP(vap); 1262 iv->phy_ctxt = NULL; 1263 iv->is_uploaded = 0; 1264 } 1265 sc->sc_firmware_state = 0; 1266 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE; 1267 1268 /* device going down, Stop using ICT table */ 1269 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1270 1271 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1272 1273 if (iwm_nic_lock(sc)) { 1274 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1275 1276 /* Stop each Tx DMA channel */ 1277 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1278 IWM_WRITE(sc, 1279 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1280 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl); 1281 } 1282 1283 /* Wait for DMA channels to be idle */ 1284 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask, 1285 5000)) { 1286 device_printf(sc->sc_dev, 1287 "Failing on timeout while stopping DMA channel: [0x%08x]\n", 1288 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)); 1289 } 1290 iwm_nic_unlock(sc); 1291 } 1292 iwm_pcie_rx_stop(sc); 1293 1294 /* Stop RX ring. */ 1295 iwm_reset_rx_ring(sc, &sc->rxq); 1296 1297 /* Reset all TX rings. */ 1298 for (qid = 0; qid < nitems(sc->txq); qid++) 1299 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1300 1301 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1302 /* Power-down device's busmaster DMA clocks */ 1303 if (iwm_nic_lock(sc)) { 1304 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, 1305 IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1306 iwm_nic_unlock(sc); 1307 } 1308 DELAY(5); 1309 } 1310 1311 /* Make sure (redundant) we've released our request to stay awake */ 1312 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1313 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1314 1315 /* Stop the device, and put it in low power state */ 1316 iwm_apm_stop(sc); 1317 1318 /* Upon stop, the APM issues an interrupt if HW RF kill is set. 1319 * Clean again the interrupt here 1320 */ 1321 iwm_disable_interrupts(sc); 1322 /* stop and reset the on-board processor */ 1323 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1324 1325 /* 1326 * Even if we stop the HW, we still want the RF kill 1327 * interrupt 1328 */ 1329 iwm_enable_rfkill_int(sc); 1330 iwm_check_rfkill(sc); 1331 } 1332 1333 /* iwlwifi: mvm/ops.c */ 1334 static void 1335 iwm_mvm_nic_config(struct iwm_softc *sc) 1336 { 1337 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1338 uint32_t reg_val = 0; 1339 uint32_t phy_config = iwm_mvm_get_phy_config(sc); 1340 1341 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1342 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1343 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1344 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1345 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1346 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1347 1348 /* SKU control */ 1349 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1350 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1351 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1352 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1353 1354 /* radio configuration */ 1355 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1356 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1357 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1358 1359 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val); 1360 1361 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1362 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1363 radio_cfg_step, radio_cfg_dash); 1364 1365 /* 1366 * W/A : NIC is stuck in a reset state after Early PCIe power off 1367 * (PCIe power is lost before PERST# is asserted), causing ME FW 1368 * to lose ownership and not being able to obtain it back. 1369 */ 1370 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1371 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1372 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1373 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1374 } 1375 } 1376 1377 static int 1378 iwm_nic_rx_init(struct iwm_softc *sc) 1379 { 1380 /* 1381 * Initialize RX ring. This is from the iwn driver. 1382 */ 1383 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1384 1385 /* Stop Rx DMA */ 1386 iwm_pcie_rx_stop(sc); 1387 1388 if (!iwm_nic_lock(sc)) 1389 return EBUSY; 1390 1391 /* reset and flush pointers */ 1392 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1393 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1394 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1395 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1396 1397 /* Set physical address of RX ring (256-byte aligned). */ 1398 IWM_WRITE(sc, 1399 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8); 1400 1401 /* Set physical address of RX status (16-byte aligned). */ 1402 IWM_WRITE(sc, 1403 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1404 1405 /* Enable Rx DMA 1406 * XXX 5000 HW isn't supported by the iwm(4) driver. 1407 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 1408 * the credit mechanism in 5000 HW RX FIFO 1409 * Direct rx interrupts to hosts 1410 * Rx buffer size 4 or 8k or 12k 1411 * RB timeout 0x10 1412 * 256 RBDs 1413 */ 1414 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1415 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1416 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1417 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1418 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1419 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1420 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1421 1422 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1423 1424 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1425 if (sc->cfg->host_interrupt_operation_mode) 1426 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1427 1428 /* 1429 * Thus sayeth el jefe (iwlwifi) via a comment: 1430 * 1431 * This value should initially be 0 (before preparing any 1432 * RBs), should be 8 after preparing the first 8 RBs (for example) 1433 */ 1434 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1435 1436 iwm_nic_unlock(sc); 1437 1438 return 0; 1439 } 1440 1441 static int 1442 iwm_nic_tx_init(struct iwm_softc *sc) 1443 { 1444 int qid; 1445 1446 if (!iwm_nic_lock(sc)) 1447 return EBUSY; 1448 1449 /* Deactivate TX scheduler. */ 1450 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1451 1452 /* Set physical address of "keep warm" page (16-byte aligned). */ 1453 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1454 1455 /* Initialize TX rings. */ 1456 for (qid = 0; qid < nitems(sc->txq); qid++) { 1457 struct iwm_tx_ring *txq = &sc->txq[qid]; 1458 1459 /* Set physical address of TX ring (256-byte aligned). */ 1460 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1461 txq->desc_dma.paddr >> 8); 1462 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1463 "%s: loading ring %d descriptors (%p) at %lx\n", 1464 __func__, 1465 qid, txq->desc, 1466 (unsigned long) (txq->desc_dma.paddr >> 8)); 1467 } 1468 1469 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1470 1471 iwm_nic_unlock(sc); 1472 1473 return 0; 1474 } 1475 1476 static int 1477 iwm_nic_init(struct iwm_softc *sc) 1478 { 1479 int error; 1480 1481 iwm_apm_init(sc); 1482 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1483 iwm_set_pwr(sc); 1484 1485 iwm_mvm_nic_config(sc); 1486 1487 if ((error = iwm_nic_rx_init(sc)) != 0) 1488 return error; 1489 1490 /* 1491 * Ditto for TX, from iwn 1492 */ 1493 if ((error = iwm_nic_tx_init(sc)) != 0) 1494 return error; 1495 1496 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1497 "%s: shadow registers enabled\n", __func__); 1498 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1499 1500 return 0; 1501 } 1502 1503 int 1504 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1505 { 1506 if (!iwm_nic_lock(sc)) { 1507 device_printf(sc->sc_dev, 1508 "%s: cannot enable txq %d\n", 1509 __func__, 1510 qid); 1511 return EBUSY; 1512 } 1513 1514 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1515 1516 if (qid == IWM_MVM_CMD_QUEUE) { 1517 /* unactivate before configuration */ 1518 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1519 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) 1520 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1521 1522 iwm_nic_unlock(sc); 1523 1524 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid)); 1525 1526 if (!iwm_nic_lock(sc)) { 1527 device_printf(sc->sc_dev, 1528 "%s: cannot enable txq %d\n", __func__, qid); 1529 return EBUSY; 1530 } 1531 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1532 iwm_nic_unlock(sc); 1533 1534 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1535 /* Set scheduler window size and frame limit. */ 1536 iwm_write_mem32(sc, 1537 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1538 sizeof(uint32_t), 1539 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1540 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1541 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1542 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1543 1544 if (!iwm_nic_lock(sc)) { 1545 device_printf(sc->sc_dev, 1546 "%s: cannot enable txq %d\n", __func__, qid); 1547 return EBUSY; 1548 } 1549 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1550 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1551 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1552 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1553 IWM_SCD_QUEUE_STTS_REG_MSK); 1554 } else { 1555 struct iwm_scd_txq_cfg_cmd cmd; 1556 int error; 1557 1558 iwm_nic_unlock(sc); 1559 1560 memset(&cmd, 0, sizeof(cmd)); 1561 cmd.scd_queue = qid; 1562 cmd.enable = 1; 1563 cmd.sta_id = sta_id; 1564 cmd.tx_fifo = fifo; 1565 cmd.aggregate = 0; 1566 cmd.window = IWM_FRAME_LIMIT; 1567 1568 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1569 sizeof(cmd), &cmd); 1570 if (error) { 1571 device_printf(sc->sc_dev, 1572 "cannot enable txq %d\n", qid); 1573 return error; 1574 } 1575 1576 if (!iwm_nic_lock(sc)) 1577 return EBUSY; 1578 } 1579 1580 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 1581 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid); 1582 1583 iwm_nic_unlock(sc); 1584 1585 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1586 __func__, qid, fifo); 1587 1588 return 0; 1589 } 1590 1591 static int 1592 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr) 1593 { 1594 int error, chnl; 1595 1596 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1597 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t); 1598 1599 if (!iwm_nic_lock(sc)) 1600 return EBUSY; 1601 1602 iwm_ict_reset(sc); 1603 1604 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1605 if (scd_base_addr != 0 && 1606 scd_base_addr != sc->scd_base_addr) { 1607 device_printf(sc->sc_dev, 1608 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1609 __func__, sc->scd_base_addr, scd_base_addr); 1610 } 1611 1612 iwm_nic_unlock(sc); 1613 1614 /* reset context data, TX status and translation data */ 1615 error = iwm_write_mem(sc, 1616 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1617 NULL, clear_dwords); 1618 if (error) 1619 return EBUSY; 1620 1621 if (!iwm_nic_lock(sc)) 1622 return EBUSY; 1623 1624 /* Set physical address of TX scheduler rings (1KB aligned). */ 1625 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1626 1627 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1628 1629 iwm_nic_unlock(sc); 1630 1631 /* enable command channel */ 1632 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7); 1633 if (error) 1634 return error; 1635 1636 if (!iwm_nic_lock(sc)) 1637 return EBUSY; 1638 1639 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1640 1641 /* Enable DMA channels. */ 1642 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1643 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1644 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1645 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1646 } 1647 1648 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1649 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1650 1651 iwm_nic_unlock(sc); 1652 1653 /* Enable L1-Active */ 1654 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 1655 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1656 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1657 } 1658 1659 return error; 1660 } 1661 1662 /* 1663 * NVM read access and content parsing. We do not support 1664 * external NVM or writing NVM. 1665 * iwlwifi/mvm/nvm.c 1666 */ 1667 1668 /* Default NVM size to read */ 1669 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1670 1671 #define IWM_NVM_WRITE_OPCODE 1 1672 #define IWM_NVM_READ_OPCODE 0 1673 1674 /* load nvm chunk response */ 1675 enum { 1676 IWM_READ_NVM_CHUNK_SUCCEED = 0, 1677 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 1678 }; 1679 1680 static int 1681 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1682 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1683 { 1684 struct iwm_nvm_access_cmd nvm_access_cmd = { 1685 .offset = htole16(offset), 1686 .length = htole16(length), 1687 .type = htole16(section), 1688 .op_code = IWM_NVM_READ_OPCODE, 1689 }; 1690 struct iwm_nvm_access_resp *nvm_resp; 1691 struct iwm_rx_packet *pkt; 1692 struct iwm_host_cmd cmd = { 1693 .id = IWM_NVM_ACCESS_CMD, 1694 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL, 1695 .data = { &nvm_access_cmd, }, 1696 }; 1697 int ret, bytes_read, offset_read; 1698 uint8_t *resp_data; 1699 1700 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1701 1702 ret = iwm_send_cmd(sc, &cmd); 1703 if (ret) { 1704 device_printf(sc->sc_dev, 1705 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1706 return ret; 1707 } 1708 1709 pkt = cmd.resp_pkt; 1710 1711 /* Extract NVM response */ 1712 nvm_resp = (void *)pkt->data; 1713 ret = le16toh(nvm_resp->status); 1714 bytes_read = le16toh(nvm_resp->length); 1715 offset_read = le16toh(nvm_resp->offset); 1716 resp_data = nvm_resp->data; 1717 if (ret) { 1718 if ((offset != 0) && 1719 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 1720 /* 1721 * meaning of NOT_VALID_ADDRESS: 1722 * driver try to read chunk from address that is 1723 * multiple of 2K and got an error since addr is empty. 1724 * meaning of (offset != 0): driver already 1725 * read valid data from another chunk so this case 1726 * is not an error. 1727 */ 1728 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1729 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 1730 offset); 1731 *len = 0; 1732 ret = 0; 1733 } else { 1734 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1735 "NVM access command failed with status %d\n", ret); 1736 ret = EIO; 1737 } 1738 goto exit; 1739 } 1740 1741 if (offset_read != offset) { 1742 device_printf(sc->sc_dev, 1743 "NVM ACCESS response with invalid offset %d\n", 1744 offset_read); 1745 ret = EINVAL; 1746 goto exit; 1747 } 1748 1749 if (bytes_read > length) { 1750 device_printf(sc->sc_dev, 1751 "NVM ACCESS response with too much data " 1752 "(%d bytes requested, %d bytes received)\n", 1753 length, bytes_read); 1754 ret = EINVAL; 1755 goto exit; 1756 } 1757 1758 /* Write data to NVM */ 1759 memcpy(data + offset, resp_data, bytes_read); 1760 *len = bytes_read; 1761 1762 exit: 1763 iwm_free_resp(sc, &cmd); 1764 return ret; 1765 } 1766 1767 /* 1768 * Reads an NVM section completely. 1769 * NICs prior to 7000 family don't have a real NVM, but just read 1770 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1771 * by uCode, we need to manually check in this case that we don't 1772 * overflow and try to read more than the EEPROM size. 1773 * For 7000 family NICs, we supply the maximal size we can read, and 1774 * the uCode fills the response with as much data as we can, 1775 * without overflowing, so no check is needed. 1776 */ 1777 static int 1778 iwm_nvm_read_section(struct iwm_softc *sc, 1779 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read) 1780 { 1781 uint16_t seglen, length, offset = 0; 1782 int ret; 1783 1784 /* Set nvm section read length */ 1785 length = IWM_NVM_DEFAULT_CHUNK_SIZE; 1786 1787 seglen = length; 1788 1789 /* Read the NVM until exhausted (reading less than requested) */ 1790 while (seglen == length) { 1791 /* Check no memory assumptions fail and cause an overflow */ 1792 if ((size_read + offset + length) > 1793 sc->cfg->eeprom_size) { 1794 device_printf(sc->sc_dev, 1795 "EEPROM size is too small for NVM\n"); 1796 return ENOBUFS; 1797 } 1798 1799 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen); 1800 if (ret) { 1801 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1802 "Cannot read NVM from section %d offset %d, length %d\n", 1803 section, offset, length); 1804 return ret; 1805 } 1806 offset += seglen; 1807 } 1808 1809 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1810 "NVM section %d read completed\n", section); 1811 *len = offset; 1812 return 0; 1813 } 1814 1815 /* 1816 * BEGIN IWM_NVM_PARSE 1817 */ 1818 1819 /* iwlwifi/iwl-nvm-parse.c */ 1820 1821 /* NVM offsets (in words) definitions */ 1822 enum iwm_nvm_offsets { 1823 /* NVM HW-Section offset (in words) definitions */ 1824 IWM_HW_ADDR = 0x15, 1825 1826 /* NVM SW-Section offset (in words) definitions */ 1827 IWM_NVM_SW_SECTION = 0x1C0, 1828 IWM_NVM_VERSION = 0, 1829 IWM_RADIO_CFG = 1, 1830 IWM_SKU = 2, 1831 IWM_N_HW_ADDRS = 3, 1832 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION, 1833 1834 /* NVM calibration section offset (in words) definitions */ 1835 IWM_NVM_CALIB_SECTION = 0x2B8, 1836 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION 1837 }; 1838 1839 enum iwm_8000_nvm_offsets { 1840 /* NVM HW-Section offset (in words) definitions */ 1841 IWM_HW_ADDR0_WFPM_8000 = 0x12, 1842 IWM_HW_ADDR1_WFPM_8000 = 0x16, 1843 IWM_HW_ADDR0_PCIE_8000 = 0x8A, 1844 IWM_HW_ADDR1_PCIE_8000 = 0x8E, 1845 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1, 1846 1847 /* NVM SW-Section offset (in words) definitions */ 1848 IWM_NVM_SW_SECTION_8000 = 0x1C0, 1849 IWM_NVM_VERSION_8000 = 0, 1850 IWM_RADIO_CFG_8000 = 0, 1851 IWM_SKU_8000 = 2, 1852 IWM_N_HW_ADDRS_8000 = 3, 1853 1854 /* NVM REGULATORY -Section offset (in words) definitions */ 1855 IWM_NVM_CHANNELS_8000 = 0, 1856 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7, 1857 IWM_NVM_LAR_OFFSET_8000 = 0x507, 1858 IWM_NVM_LAR_ENABLED_8000 = 0x7, 1859 1860 /* NVM calibration section offset (in words) definitions */ 1861 IWM_NVM_CALIB_SECTION_8000 = 0x2B8, 1862 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000 1863 }; 1864 1865 /* SKU Capabilities (actual values from NVM definition) */ 1866 enum nvm_sku_bits { 1867 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0), 1868 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1), 1869 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2), 1870 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3), 1871 }; 1872 1873 /* radio config bits (actual values from NVM definition) */ 1874 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ 1875 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ 1876 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ 1877 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ 1878 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ 1879 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ 1880 1881 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF) 1882 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF) 1883 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF) 1884 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF) 1885 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF) 1886 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF) 1887 1888 /** 1889 * enum iwm_nvm_channel_flags - channel flags in NVM 1890 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo 1891 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel 1892 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed 1893 * @IWM_NVM_CHANNEL_RADAR: radar detection required 1894 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c 1895 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate 1896 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 1897 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 1898 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) 1899 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) 1900 */ 1901 enum iwm_nvm_channel_flags { 1902 IWM_NVM_CHANNEL_VALID = (1 << 0), 1903 IWM_NVM_CHANNEL_IBSS = (1 << 1), 1904 IWM_NVM_CHANNEL_ACTIVE = (1 << 3), 1905 IWM_NVM_CHANNEL_RADAR = (1 << 4), 1906 IWM_NVM_CHANNEL_DFS = (1 << 7), 1907 IWM_NVM_CHANNEL_WIDE = (1 << 8), 1908 IWM_NVM_CHANNEL_40MHZ = (1 << 9), 1909 IWM_NVM_CHANNEL_80MHZ = (1 << 10), 1910 IWM_NVM_CHANNEL_160MHZ = (1 << 11), 1911 }; 1912 1913 /* 1914 * Translate EEPROM flags to net80211. 1915 */ 1916 static uint32_t 1917 iwm_eeprom_channel_flags(uint16_t ch_flags) 1918 { 1919 uint32_t nflags; 1920 1921 nflags = 0; 1922 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 1923 nflags |= IEEE80211_CHAN_PASSIVE; 1924 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 1925 nflags |= IEEE80211_CHAN_NOADHOC; 1926 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 1927 nflags |= IEEE80211_CHAN_DFS; 1928 /* Just in case. */ 1929 nflags |= IEEE80211_CHAN_NOADHOC; 1930 } 1931 1932 return (nflags); 1933 } 1934 1935 static void 1936 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 1937 int maxchans, int *nchans, int ch_idx, size_t ch_num, 1938 const uint8_t bands[]) 1939 { 1940 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags; 1941 uint32_t nflags; 1942 uint16_t ch_flags; 1943 uint8_t ieee; 1944 int error; 1945 1946 for (; ch_idx < ch_num; ch_idx++) { 1947 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 1948 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1949 ieee = iwm_nvm_channels[ch_idx]; 1950 else 1951 ieee = iwm_nvm_channels_8000[ch_idx]; 1952 1953 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 1954 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1955 "Ch. %d Flags %x [%sGHz] - No traffic\n", 1956 ieee, ch_flags, 1957 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1958 "5.2" : "2.4"); 1959 continue; 1960 } 1961 1962 nflags = iwm_eeprom_channel_flags(ch_flags); 1963 error = ieee80211_add_channel(chans, maxchans, nchans, 1964 ieee, 0, 0, nflags, bands); 1965 if (error != 0) 1966 break; 1967 1968 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1969 "Ch. %d Flags %x [%sGHz] - Added\n", 1970 ieee, ch_flags, 1971 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1972 "5.2" : "2.4"); 1973 } 1974 } 1975 1976 static void 1977 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 1978 struct ieee80211_channel chans[]) 1979 { 1980 struct iwm_softc *sc = ic->ic_softc; 1981 struct iwm_nvm_data *data = sc->nvm_data; 1982 uint8_t bands[IEEE80211_MODE_BYTES]; 1983 size_t ch_num; 1984 1985 memset(bands, 0, sizeof(bands)); 1986 /* 1-13: 11b/g channels. */ 1987 setbit(bands, IEEE80211_MODE_11B); 1988 setbit(bands, IEEE80211_MODE_11G); 1989 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 1990 IWM_NUM_2GHZ_CHANNELS - 1, bands); 1991 1992 /* 14: 11b channel only. */ 1993 clrbit(bands, IEEE80211_MODE_11G); 1994 iwm_add_channel_band(sc, chans, maxchans, nchans, 1995 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 1996 1997 if (data->sku_cap_band_52GHz_enable) { 1998 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1999 ch_num = nitems(iwm_nvm_channels); 2000 else 2001 ch_num = nitems(iwm_nvm_channels_8000); 2002 memset(bands, 0, sizeof(bands)); 2003 setbit(bands, IEEE80211_MODE_11A); 2004 iwm_add_channel_band(sc, chans, maxchans, nchans, 2005 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2006 } 2007 } 2008 2009 static void 2010 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2011 const uint16_t *mac_override, const uint16_t *nvm_hw) 2012 { 2013 const uint8_t *hw_addr; 2014 2015 if (mac_override) { 2016 static const uint8_t reserved_mac[] = { 2017 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2018 }; 2019 2020 hw_addr = (const uint8_t *)(mac_override + 2021 IWM_MAC_ADDRESS_OVERRIDE_8000); 2022 2023 /* 2024 * Store the MAC address from MAO section. 2025 * No byte swapping is required in MAO section 2026 */ 2027 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2028 2029 /* 2030 * Force the use of the OTP MAC address in case of reserved MAC 2031 * address in the NVM, or if address is given but invalid. 2032 */ 2033 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2034 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2035 iwm_is_valid_ether_addr(data->hw_addr) && 2036 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2037 return; 2038 2039 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2040 "%s: mac address from nvm override section invalid\n", 2041 __func__); 2042 } 2043 2044 if (nvm_hw) { 2045 /* read the mac address from WFMP registers */ 2046 uint32_t mac_addr0 = 2047 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2048 uint32_t mac_addr1 = 2049 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2050 2051 hw_addr = (const uint8_t *)&mac_addr0; 2052 data->hw_addr[0] = hw_addr[3]; 2053 data->hw_addr[1] = hw_addr[2]; 2054 data->hw_addr[2] = hw_addr[1]; 2055 data->hw_addr[3] = hw_addr[0]; 2056 2057 hw_addr = (const uint8_t *)&mac_addr1; 2058 data->hw_addr[4] = hw_addr[1]; 2059 data->hw_addr[5] = hw_addr[0]; 2060 2061 return; 2062 } 2063 2064 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2065 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2066 } 2067 2068 static int 2069 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2070 const uint16_t *phy_sku) 2071 { 2072 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2073 return le16_to_cpup(nvm_sw + IWM_SKU); 2074 2075 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2076 } 2077 2078 static int 2079 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2080 { 2081 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2082 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2083 else 2084 return le32_to_cpup((const uint32_t *)(nvm_sw + 2085 IWM_NVM_VERSION_8000)); 2086 } 2087 2088 static int 2089 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2090 const uint16_t *phy_sku) 2091 { 2092 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2093 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2094 2095 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2096 } 2097 2098 static int 2099 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2100 { 2101 int n_hw_addr; 2102 2103 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2104 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2105 2106 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2107 2108 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2109 } 2110 2111 static void 2112 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2113 uint32_t radio_cfg) 2114 { 2115 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2116 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2117 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2118 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2119 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2120 return; 2121 } 2122 2123 /* set the radio configuration for family 8000 */ 2124 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2125 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2126 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2127 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg); 2128 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2129 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2130 } 2131 2132 static int 2133 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, 2134 const uint16_t *nvm_hw, const uint16_t *mac_override) 2135 { 2136 #ifdef notyet /* for FAMILY 9000 */ 2137 if (cfg->mac_addr_from_csr) { 2138 iwm_set_hw_address_from_csr(sc, data); 2139 } else 2140 #endif 2141 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2142 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR); 2143 2144 /* The byte order is little endian 16 bit, meaning 214365 */ 2145 data->hw_addr[0] = hw_addr[1]; 2146 data->hw_addr[1] = hw_addr[0]; 2147 data->hw_addr[2] = hw_addr[3]; 2148 data->hw_addr[3] = hw_addr[2]; 2149 data->hw_addr[4] = hw_addr[5]; 2150 data->hw_addr[5] = hw_addr[4]; 2151 } else { 2152 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw); 2153 } 2154 2155 if (!iwm_is_valid_ether_addr(data->hw_addr)) { 2156 device_printf(sc->sc_dev, "no valid mac address was found\n"); 2157 return EINVAL; 2158 } 2159 2160 return 0; 2161 } 2162 2163 static struct iwm_nvm_data * 2164 iwm_parse_nvm_data(struct iwm_softc *sc, 2165 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2166 const uint16_t *nvm_calib, const uint16_t *mac_override, 2167 const uint16_t *phy_sku, const uint16_t *regulatory) 2168 { 2169 struct iwm_nvm_data *data; 2170 uint32_t sku, radio_cfg; 2171 uint16_t lar_config; 2172 2173 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2174 data = malloc(sizeof(*data) + 2175 IWM_NUM_CHANNELS * sizeof(uint16_t), 2176 M_DEVBUF, M_NOWAIT | M_ZERO); 2177 } else { 2178 data = malloc(sizeof(*data) + 2179 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t), 2180 M_DEVBUF, M_NOWAIT | M_ZERO); 2181 } 2182 if (!data) 2183 return NULL; 2184 2185 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2186 2187 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2188 iwm_set_radio_cfg(sc, data, radio_cfg); 2189 2190 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2191 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2192 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2193 data->sku_cap_11n_enable = 0; 2194 2195 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2196 2197 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2198 uint16_t lar_offset = data->nvm_version < 0xE39 ? 2199 IWM_NVM_LAR_OFFSET_8000_OLD : 2200 IWM_NVM_LAR_OFFSET_8000; 2201 2202 lar_config = le16_to_cpup(regulatory + lar_offset); 2203 data->lar_enabled = !!(lar_config & 2204 IWM_NVM_LAR_ENABLED_8000); 2205 } 2206 2207 /* If no valid mac address was found - bail out */ 2208 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) { 2209 free(data, M_DEVBUF); 2210 return NULL; 2211 } 2212 2213 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2214 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ? 2215 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS], 2216 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2217 } else { 2218 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2219 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2220 } 2221 2222 return data; 2223 } 2224 2225 static void 2226 iwm_free_nvm_data(struct iwm_nvm_data *data) 2227 { 2228 if (data != NULL) 2229 free(data, M_DEVBUF); 2230 } 2231 2232 static struct iwm_nvm_data * 2233 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2234 { 2235 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2236 2237 /* Checking for required sections */ 2238 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2239 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2240 !sections[sc->cfg->nvm_hw_section_num].data) { 2241 device_printf(sc->sc_dev, 2242 "Can't parse empty OTP/NVM sections\n"); 2243 return NULL; 2244 } 2245 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2246 /* SW and REGULATORY sections are mandatory */ 2247 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2248 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2249 device_printf(sc->sc_dev, 2250 "Can't parse empty OTP/NVM sections\n"); 2251 return NULL; 2252 } 2253 /* MAC_OVERRIDE or at least HW section must exist */ 2254 if (!sections[sc->cfg->nvm_hw_section_num].data && 2255 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2256 device_printf(sc->sc_dev, 2257 "Can't parse mac_address, empty sections\n"); 2258 return NULL; 2259 } 2260 2261 /* PHY_SKU section is mandatory in B0 */ 2262 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2263 device_printf(sc->sc_dev, 2264 "Can't parse phy_sku in B0, empty sections\n"); 2265 return NULL; 2266 } 2267 } else { 2268 panic("unknown device family %d\n", sc->cfg->device_family); 2269 } 2270 2271 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data; 2272 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2273 calib = (const uint16_t *) 2274 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2275 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ? 2276 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data : 2277 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2278 mac_override = (const uint16_t *) 2279 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2280 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2281 2282 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2283 phy_sku, regulatory); 2284 } 2285 2286 static int 2287 iwm_nvm_init(struct iwm_softc *sc) 2288 { 2289 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS]; 2290 int i, ret, section; 2291 uint32_t size_read = 0; 2292 uint8_t *nvm_buffer, *temp; 2293 uint16_t len; 2294 2295 memset(nvm_sections, 0, sizeof(nvm_sections)); 2296 2297 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS) 2298 return EINVAL; 2299 2300 /* load NVM values from nic */ 2301 /* Read From FW NVM */ 2302 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n"); 2303 2304 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO); 2305 if (!nvm_buffer) 2306 return ENOMEM; 2307 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) { 2308 /* we override the constness for initial read */ 2309 ret = iwm_nvm_read_section(sc, section, nvm_buffer, 2310 &len, size_read); 2311 if (ret) 2312 continue; 2313 size_read += len; 2314 temp = malloc(len, M_DEVBUF, M_NOWAIT); 2315 if (!temp) { 2316 ret = ENOMEM; 2317 break; 2318 } 2319 memcpy(temp, nvm_buffer, len); 2320 2321 nvm_sections[section].data = temp; 2322 nvm_sections[section].length = len; 2323 } 2324 if (!size_read) 2325 device_printf(sc->sc_dev, "OTP is blank\n"); 2326 free(nvm_buffer, M_DEVBUF); 2327 2328 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections); 2329 if (!sc->nvm_data) 2330 return EINVAL; 2331 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 2332 "nvm version = %x\n", sc->nvm_data->nvm_version); 2333 2334 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) { 2335 if (nvm_sections[i].data != NULL) 2336 free(nvm_sections[i].data, M_DEVBUF); 2337 } 2338 2339 return 0; 2340 } 2341 2342 static int 2343 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num, 2344 const struct iwm_fw_desc *section) 2345 { 2346 struct iwm_dma_info *dma = &sc->fw_dma; 2347 uint8_t *v_addr; 2348 bus_addr_t p_addr; 2349 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len); 2350 int ret = 0; 2351 2352 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2353 "%s: [%d] uCode section being loaded...\n", 2354 __func__, section_num); 2355 2356 v_addr = dma->vaddr; 2357 p_addr = dma->paddr; 2358 2359 for (offset = 0; offset < section->len; offset += chunk_sz) { 2360 uint32_t copy_size, dst_addr; 2361 int extended_addr = FALSE; 2362 2363 copy_size = MIN(chunk_sz, section->len - offset); 2364 dst_addr = section->offset + offset; 2365 2366 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2367 dst_addr <= IWM_FW_MEM_EXTENDED_END) 2368 extended_addr = TRUE; 2369 2370 if (extended_addr) 2371 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2372 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2373 2374 memcpy(v_addr, (const uint8_t *)section->data + offset, 2375 copy_size); 2376 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2377 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr, 2378 copy_size); 2379 2380 if (extended_addr) 2381 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2382 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2383 2384 if (ret) { 2385 device_printf(sc->sc_dev, 2386 "%s: Could not load the [%d] uCode section\n", 2387 __func__, section_num); 2388 break; 2389 } 2390 } 2391 2392 return ret; 2393 } 2394 2395 /* 2396 * ucode 2397 */ 2398 static int 2399 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2400 bus_addr_t phy_addr, uint32_t byte_cnt) 2401 { 2402 sc->sc_fw_chunk_done = 0; 2403 2404 if (!iwm_nic_lock(sc)) 2405 return EBUSY; 2406 2407 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2408 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2409 2410 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2411 dst_addr); 2412 2413 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2414 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2415 2416 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2417 (iwm_get_dma_hi_addr(phy_addr) 2418 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2419 2420 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2421 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2422 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2423 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2424 2425 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2426 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2427 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2428 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2429 2430 iwm_nic_unlock(sc); 2431 2432 /* wait up to 5s for this segment to load */ 2433 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5); 2434 2435 if (!sc->sc_fw_chunk_done) { 2436 device_printf(sc->sc_dev, 2437 "fw chunk addr 0x%x len %d failed to load\n", 2438 dst_addr, byte_cnt); 2439 return ETIMEDOUT; 2440 } 2441 2442 return 0; 2443 } 2444 2445 static int 2446 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 2447 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2448 { 2449 int shift_param; 2450 int i, ret = 0, sec_num = 0x1; 2451 uint32_t val, last_read_idx = 0; 2452 2453 if (cpu == 1) { 2454 shift_param = 0; 2455 *first_ucode_section = 0; 2456 } else { 2457 shift_param = 16; 2458 (*first_ucode_section)++; 2459 } 2460 2461 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2462 last_read_idx = i; 2463 2464 /* 2465 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2466 * CPU1 to CPU2. 2467 * PAGING_SEPARATOR_SECTION delimiter - separate between 2468 * CPU2 non paged to CPU2 paging sec. 2469 */ 2470 if (!image->sec[i].data || 2471 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2472 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2473 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2474 "Break since Data not valid or Empty section, sec = %d\n", 2475 i); 2476 break; 2477 } 2478 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2479 if (ret) 2480 return ret; 2481 2482 /* Notify the ucode of the loaded section number and status */ 2483 if (iwm_nic_lock(sc)) { 2484 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2485 val = val | (sec_num << shift_param); 2486 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2487 sec_num = (sec_num << 1) | 0x1; 2488 iwm_nic_unlock(sc); 2489 } 2490 } 2491 2492 *first_ucode_section = last_read_idx; 2493 2494 iwm_enable_interrupts(sc); 2495 2496 if (iwm_nic_lock(sc)) { 2497 if (cpu == 1) 2498 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2499 else 2500 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2501 iwm_nic_unlock(sc); 2502 } 2503 2504 return 0; 2505 } 2506 2507 static int 2508 iwm_pcie_load_cpu_sections(struct iwm_softc *sc, 2509 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2510 { 2511 int shift_param; 2512 int i, ret = 0; 2513 uint32_t last_read_idx = 0; 2514 2515 if (cpu == 1) { 2516 shift_param = 0; 2517 *first_ucode_section = 0; 2518 } else { 2519 shift_param = 16; 2520 (*first_ucode_section)++; 2521 } 2522 2523 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2524 last_read_idx = i; 2525 2526 /* 2527 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2528 * CPU1 to CPU2. 2529 * PAGING_SEPARATOR_SECTION delimiter - separate between 2530 * CPU2 non paged to CPU2 paging sec. 2531 */ 2532 if (!image->sec[i].data || 2533 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2534 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2535 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2536 "Break since Data not valid or Empty section, sec = %d\n", 2537 i); 2538 break; 2539 } 2540 2541 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2542 if (ret) 2543 return ret; 2544 } 2545 2546 *first_ucode_section = last_read_idx; 2547 2548 return 0; 2549 2550 } 2551 2552 static int 2553 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image) 2554 { 2555 int ret = 0; 2556 int first_ucode_section; 2557 2558 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2559 image->is_dual_cpus ? "Dual" : "Single"); 2560 2561 /* load to FW the binary non secured sections of CPU1 */ 2562 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section); 2563 if (ret) 2564 return ret; 2565 2566 if (image->is_dual_cpus) { 2567 /* set CPU2 header address */ 2568 if (iwm_nic_lock(sc)) { 2569 iwm_write_prph(sc, 2570 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 2571 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE); 2572 iwm_nic_unlock(sc); 2573 } 2574 2575 /* load to FW the binary sections of CPU2 */ 2576 ret = iwm_pcie_load_cpu_sections(sc, image, 2, 2577 &first_ucode_section); 2578 if (ret) 2579 return ret; 2580 } 2581 2582 iwm_enable_interrupts(sc); 2583 2584 /* release CPU reset */ 2585 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2586 2587 return 0; 2588 } 2589 2590 int 2591 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc, 2592 const struct iwm_fw_img *image) 2593 { 2594 int ret = 0; 2595 int first_ucode_section; 2596 2597 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2598 image->is_dual_cpus ? "Dual" : "Single"); 2599 2600 /* configure the ucode to be ready to get the secured image */ 2601 /* release CPU reset */ 2602 if (iwm_nic_lock(sc)) { 2603 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, 2604 IWM_RELEASE_CPU_RESET_BIT); 2605 iwm_nic_unlock(sc); 2606 } 2607 2608 /* load to FW the binary Secured sections of CPU1 */ 2609 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1, 2610 &first_ucode_section); 2611 if (ret) 2612 return ret; 2613 2614 /* load to FW the binary sections of CPU2 */ 2615 return iwm_pcie_load_cpu_sections_8000(sc, image, 2, 2616 &first_ucode_section); 2617 } 2618 2619 /* XXX Get rid of this definition */ 2620 static inline void 2621 iwm_enable_fw_load_int(struct iwm_softc *sc) 2622 { 2623 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n"); 2624 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX; 2625 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 2626 } 2627 2628 /* XXX Add proper rfkill support code */ 2629 static int 2630 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw) 2631 { 2632 int ret; 2633 2634 /* This may fail if AMT took ownership of the device */ 2635 if (iwm_prepare_card_hw(sc)) { 2636 device_printf(sc->sc_dev, 2637 "%s: Exit HW not ready\n", __func__); 2638 ret = EIO; 2639 goto out; 2640 } 2641 2642 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2643 2644 iwm_disable_interrupts(sc); 2645 2646 /* make sure rfkill handshake bits are cleared */ 2647 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2648 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2649 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2650 2651 /* clear (again), then enable host interrupts */ 2652 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2653 2654 ret = iwm_nic_init(sc); 2655 if (ret) { 2656 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__); 2657 goto out; 2658 } 2659 2660 /* 2661 * Now, we load the firmware and don't want to be interrupted, even 2662 * by the RF-Kill interrupt (hence mask all the interrupt besides the 2663 * FH_TX interrupt which is needed to load the firmware). If the 2664 * RF-Kill switch is toggled, we will find out after having loaded 2665 * the firmware and return the proper value to the caller. 2666 */ 2667 iwm_enable_fw_load_int(sc); 2668 2669 /* really make sure rfkill handshake bits are cleared */ 2670 /* maybe we should write a few times more? just to make sure */ 2671 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2672 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2673 2674 /* Load the given image to the HW */ 2675 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 2676 ret = iwm_pcie_load_given_ucode_8000(sc, fw); 2677 else 2678 ret = iwm_pcie_load_given_ucode(sc, fw); 2679 2680 /* XXX re-check RF-Kill state */ 2681 2682 out: 2683 return ret; 2684 } 2685 2686 static int 2687 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2688 { 2689 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2690 .valid = htole32(valid_tx_ant), 2691 }; 2692 2693 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2694 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2695 } 2696 2697 /* iwlwifi: mvm/fw.c */ 2698 static int 2699 iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2700 { 2701 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2702 enum iwm_ucode_type ucode_type = sc->cur_ucode; 2703 2704 /* Set parameters */ 2705 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc)); 2706 phy_cfg_cmd.calib_control.event_trigger = 2707 sc->sc_default_calib[ucode_type].event_trigger; 2708 phy_cfg_cmd.calib_control.flow_trigger = 2709 sc->sc_default_calib[ucode_type].flow_trigger; 2710 2711 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2712 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2713 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2714 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2715 } 2716 2717 static int 2718 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data) 2719 { 2720 struct iwm_mvm_alive_data *alive_data = data; 2721 struct iwm_mvm_alive_resp_v3 *palive3; 2722 struct iwm_mvm_alive_resp *palive; 2723 struct iwm_umac_alive *umac; 2724 struct iwm_lmac_alive *lmac1; 2725 struct iwm_lmac_alive *lmac2 = NULL; 2726 uint16_t status; 2727 2728 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) { 2729 palive = (void *)pkt->data; 2730 umac = &palive->umac_data; 2731 lmac1 = &palive->lmac_data[0]; 2732 lmac2 = &palive->lmac_data[1]; 2733 status = le16toh(palive->status); 2734 } else { 2735 palive3 = (void *)pkt->data; 2736 umac = &palive3->umac_data; 2737 lmac1 = &palive3->lmac_data; 2738 status = le16toh(palive3->status); 2739 } 2740 2741 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr); 2742 if (lmac2) 2743 sc->error_event_table[1] = 2744 le32toh(lmac2->error_event_table_ptr); 2745 sc->log_event_table = le32toh(lmac1->log_event_table_ptr); 2746 sc->umac_error_event_table = le32toh(umac->error_info_addr); 2747 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr); 2748 alive_data->valid = status == IWM_ALIVE_STATUS_OK; 2749 if (sc->umac_error_event_table) 2750 sc->support_umac_log = TRUE; 2751 2752 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2753 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 2754 status, lmac1->ver_type, lmac1->ver_subtype); 2755 2756 if (lmac2) 2757 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n"); 2758 2759 IWM_DPRINTF(sc, IWM_DEBUG_FW, 2760 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2761 le32toh(umac->umac_major), 2762 le32toh(umac->umac_minor)); 2763 2764 return TRUE; 2765 } 2766 2767 static int 2768 iwm_wait_phy_db_entry(struct iwm_softc *sc, 2769 struct iwm_rx_packet *pkt, void *data) 2770 { 2771 struct iwm_phy_db *phy_db = data; 2772 2773 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) { 2774 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) { 2775 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n", 2776 __func__, pkt->hdr.code); 2777 } 2778 return TRUE; 2779 } 2780 2781 if (iwm_phy_db_set_section(phy_db, pkt)) { 2782 device_printf(sc->sc_dev, 2783 "%s: iwm_phy_db_set_section failed\n", __func__); 2784 } 2785 2786 return FALSE; 2787 } 2788 2789 static int 2790 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc, 2791 enum iwm_ucode_type ucode_type) 2792 { 2793 struct iwm_notification_wait alive_wait; 2794 struct iwm_mvm_alive_data alive_data; 2795 const struct iwm_fw_img *fw; 2796 enum iwm_ucode_type old_type = sc->cur_ucode; 2797 int error; 2798 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE }; 2799 2800 fw = &sc->sc_fw.img[ucode_type]; 2801 sc->cur_ucode = ucode_type; 2802 sc->ucode_loaded = FALSE; 2803 2804 memset(&alive_data, 0, sizeof(alive_data)); 2805 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait, 2806 alive_cmd, nitems(alive_cmd), 2807 iwm_alive_fn, &alive_data); 2808 2809 error = iwm_start_fw(sc, fw); 2810 if (error) { 2811 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2812 sc->cur_ucode = old_type; 2813 iwm_remove_notification(sc->sc_notif_wait, &alive_wait); 2814 return error; 2815 } 2816 2817 /* 2818 * Some things may run in the background now, but we 2819 * just wait for the ALIVE notification here. 2820 */ 2821 IWM_UNLOCK(sc); 2822 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait, 2823 IWM_MVM_UCODE_ALIVE_TIMEOUT); 2824 IWM_LOCK(sc); 2825 if (error) { 2826 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2827 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a; 2828 if (iwm_nic_lock(sc)) { 2829 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS); 2830 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS); 2831 iwm_nic_unlock(sc); 2832 } 2833 device_printf(sc->sc_dev, 2834 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 2835 a, b); 2836 } 2837 sc->cur_ucode = old_type; 2838 return error; 2839 } 2840 2841 if (!alive_data.valid) { 2842 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n", 2843 __func__); 2844 sc->cur_ucode = old_type; 2845 return EIO; 2846 } 2847 2848 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr); 2849 2850 /* 2851 * configure and operate fw paging mechanism. 2852 * driver configures the paging flow only once, CPU2 paging image 2853 * included in the IWM_UCODE_INIT image. 2854 */ 2855 if (fw->paging_mem_size) { 2856 error = iwm_save_fw_paging(sc, fw); 2857 if (error) { 2858 device_printf(sc->sc_dev, 2859 "%s: failed to save the FW paging image\n", 2860 __func__); 2861 return error; 2862 } 2863 2864 error = iwm_send_paging_cmd(sc, fw); 2865 if (error) { 2866 device_printf(sc->sc_dev, 2867 "%s: failed to send the paging cmd\n", __func__); 2868 iwm_free_fw_paging(sc); 2869 return error; 2870 } 2871 } 2872 2873 if (!error) 2874 sc->ucode_loaded = TRUE; 2875 return error; 2876 } 2877 2878 /* 2879 * mvm misc bits 2880 */ 2881 2882 /* 2883 * follows iwlwifi/fw.c 2884 */ 2885 static int 2886 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) 2887 { 2888 struct iwm_notification_wait calib_wait; 2889 static const uint16_t init_complete[] = { 2890 IWM_INIT_COMPLETE_NOTIF, 2891 IWM_CALIB_RES_NOTIF_PHY_DB 2892 }; 2893 int ret; 2894 2895 /* do not operate with rfkill switch turned on */ 2896 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2897 device_printf(sc->sc_dev, 2898 "radio is disabled by hardware switch\n"); 2899 return EPERM; 2900 } 2901 2902 iwm_init_notification_wait(sc->sc_notif_wait, 2903 &calib_wait, 2904 init_complete, 2905 nitems(init_complete), 2906 iwm_wait_phy_db_entry, 2907 sc->sc_phy_db); 2908 2909 /* Will also start the device */ 2910 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT); 2911 if (ret) { 2912 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n", 2913 ret); 2914 goto error; 2915 } 2916 2917 if (justnvm) { 2918 /* Read nvm */ 2919 ret = iwm_nvm_init(sc); 2920 if (ret) { 2921 device_printf(sc->sc_dev, "failed to read nvm\n"); 2922 goto error; 2923 } 2924 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr); 2925 goto error; 2926 } 2927 2928 ret = iwm_send_bt_init_conf(sc); 2929 if (ret) { 2930 device_printf(sc->sc_dev, 2931 "failed to send bt coex configuration: %d\n", ret); 2932 goto error; 2933 } 2934 2935 /* Send TX valid antennas before triggering calibrations */ 2936 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 2937 if (ret) { 2938 device_printf(sc->sc_dev, 2939 "failed to send antennas before calibration: %d\n", ret); 2940 goto error; 2941 } 2942 2943 /* 2944 * Send phy configurations command to init uCode 2945 * to start the 16.0 uCode init image internal calibrations. 2946 */ 2947 ret = iwm_send_phy_cfg_cmd(sc); 2948 if (ret) { 2949 device_printf(sc->sc_dev, 2950 "%s: Failed to run INIT calibrations: %d\n", 2951 __func__, ret); 2952 goto error; 2953 } 2954 2955 /* 2956 * Nothing to do but wait for the init complete notification 2957 * from the firmware. 2958 */ 2959 IWM_UNLOCK(sc); 2960 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait, 2961 IWM_MVM_UCODE_CALIB_TIMEOUT); 2962 IWM_LOCK(sc); 2963 2964 2965 goto out; 2966 2967 error: 2968 iwm_remove_notification(sc->sc_notif_wait, &calib_wait); 2969 out: 2970 return ret; 2971 } 2972 2973 static int 2974 iwm_mvm_config_ltr(struct iwm_softc *sc) 2975 { 2976 struct iwm_ltr_config_cmd cmd = { 2977 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE), 2978 }; 2979 2980 if (!sc->sc_ltr_enabled) 2981 return 0; 2982 2983 return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd); 2984 } 2985 2986 /* 2987 * receive side 2988 */ 2989 2990 /* (re)stock rx ring, called at init-time and at runtime */ 2991 static int 2992 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 2993 { 2994 struct iwm_rx_ring *ring = &sc->rxq; 2995 struct iwm_rx_data *data = &ring->data[idx]; 2996 struct mbuf *m; 2997 bus_dmamap_t dmamap; 2998 bus_dma_segment_t seg; 2999 int nsegs, error; 3000 3001 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 3002 if (m == NULL) 3003 return ENOBUFS; 3004 3005 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3006 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m, 3007 &seg, &nsegs, BUS_DMA_NOWAIT); 3008 if (error != 0) { 3009 device_printf(sc->sc_dev, 3010 "%s: can't map mbuf, error %d\n", __func__, error); 3011 m_freem(m); 3012 return error; 3013 } 3014 3015 if (data->m != NULL) 3016 bus_dmamap_unload(ring->data_dmat, data->map); 3017 3018 /* Swap ring->spare_map with data->map */ 3019 dmamap = data->map; 3020 data->map = ring->spare_map; 3021 ring->spare_map = dmamap; 3022 3023 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 3024 data->m = m; 3025 3026 /* Update RX descriptor. */ 3027 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned")); 3028 ring->desc[idx] = htole32(seg.ds_addr >> 8); 3029 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3030 BUS_DMASYNC_PREWRITE); 3031 3032 return 0; 3033 } 3034 3035 /* iwlwifi: mvm/rx.c */ 3036 /* 3037 * iwm_mvm_get_signal_strength - use new rx PHY INFO API 3038 * values are reported by the fw as positive values - need to negate 3039 * to obtain their dBM. Account for missing antennas by replacing 0 3040 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 3041 */ 3042 static int 3043 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) 3044 { 3045 int energy_a, energy_b, energy_c, max_energy; 3046 uint32_t val; 3047 3048 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 3049 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 3050 IWM_RX_INFO_ENERGY_ANT_A_POS; 3051 energy_a = energy_a ? -energy_a : -256; 3052 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 3053 IWM_RX_INFO_ENERGY_ANT_B_POS; 3054 energy_b = energy_b ? -energy_b : -256; 3055 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 3056 IWM_RX_INFO_ENERGY_ANT_C_POS; 3057 energy_c = energy_c ? -energy_c : -256; 3058 max_energy = MAX(energy_a, energy_b); 3059 max_energy = MAX(max_energy, energy_c); 3060 3061 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3062 "energy In A %d B %d C %d , and max %d\n", 3063 energy_a, energy_b, energy_c, max_energy); 3064 3065 return max_energy; 3066 } 3067 3068 static void 3069 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3070 { 3071 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 3072 3073 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 3074 3075 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 3076 } 3077 3078 /* 3079 * Retrieve the average noise (in dBm) among receivers. 3080 */ 3081 static int 3082 iwm_get_noise(struct iwm_softc *sc, 3083 const struct iwm_mvm_statistics_rx_non_phy *stats) 3084 { 3085 int i, total, nbant, noise; 3086 3087 total = nbant = noise = 0; 3088 for (i = 0; i < 3; i++) { 3089 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 3090 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n", 3091 __func__, 3092 i, 3093 noise); 3094 3095 if (noise) { 3096 total += noise; 3097 nbant++; 3098 } 3099 } 3100 3101 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n", 3102 __func__, nbant, total); 3103 #if 0 3104 /* There should be at least one antenna but check anyway. */ 3105 return (nbant == 0) ? -127 : (total / nbant) - 107; 3106 #else 3107 /* For now, just hard-code it to -96 to be safe */ 3108 return (-96); 3109 #endif 3110 } 3111 3112 static void 3113 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3114 { 3115 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data; 3116 3117 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 3118 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general); 3119 } 3120 3121 /* 3122 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 3123 * 3124 * Handles the actual data of the Rx packet from the fw 3125 */ 3126 static boolean_t 3127 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3128 boolean_t stolen) 3129 { 3130 struct ieee80211com *ic = &sc->sc_ic; 3131 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3132 struct ieee80211_frame *wh; 3133 struct ieee80211_node *ni; 3134 struct ieee80211_rx_stats rxs; 3135 struct iwm_rx_phy_info *phy_info; 3136 struct iwm_rx_mpdu_res_start *rx_res; 3137 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset); 3138 uint32_t len; 3139 uint32_t rx_pkt_status; 3140 int rssi; 3141 3142 phy_info = &sc->sc_last_phy_info; 3143 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 3144 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res)); 3145 len = le16toh(rx_res->byte_count); 3146 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 3147 3148 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3149 device_printf(sc->sc_dev, 3150 "dsp size out of range [0,20]: %d\n", 3151 phy_info->cfg_phy_cnt); 3152 goto fail; 3153 } 3154 3155 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3156 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3157 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3158 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3159 goto fail; 3160 } 3161 3162 rssi = iwm_mvm_get_signal_strength(sc, phy_info); 3163 3164 /* Map it to relative value */ 3165 rssi = rssi - sc->sc_noise; 3166 3167 /* replenish ring for the buffer we're going to feed to the sharks */ 3168 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3169 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3170 __func__); 3171 goto fail; 3172 } 3173 3174 m->m_data = pkt->data + sizeof(*rx_res); 3175 m->m_pkthdr.len = m->m_len = len; 3176 3177 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3178 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3179 3180 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3181 3182 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3183 "%s: phy_info: channel=%d, flags=0x%08x\n", 3184 __func__, 3185 le16toh(phy_info->channel), 3186 le16toh(phy_info->phy_flags)); 3187 3188 /* 3189 * Populate an RX state struct with the provided information. 3190 */ 3191 bzero(&rxs, sizeof(rxs)); 3192 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3193 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3194 rxs.c_ieee = le16toh(phy_info->channel); 3195 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3196 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3197 } else { 3198 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3199 } 3200 3201 /* rssi is in 1/2db units */ 3202 rxs.c_rssi = rssi * 2; 3203 rxs.c_nf = sc->sc_noise; 3204 if (ieee80211_add_rx_params(m, &rxs) == 0) { 3205 if (ni) 3206 ieee80211_free_node(ni); 3207 goto fail; 3208 } 3209 3210 if (ieee80211_radiotap_active_vap(vap)) { 3211 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3212 3213 tap->wr_flags = 0; 3214 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3215 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3216 tap->wr_chan_freq = htole16(rxs.c_freq); 3217 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3218 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3219 tap->wr_dbm_antsignal = (int8_t)rssi; 3220 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3221 tap->wr_tsft = phy_info->system_timestamp; 3222 switch (phy_info->rate) { 3223 /* CCK rates. */ 3224 case 10: tap->wr_rate = 2; break; 3225 case 20: tap->wr_rate = 4; break; 3226 case 55: tap->wr_rate = 11; break; 3227 case 110: tap->wr_rate = 22; break; 3228 /* OFDM rates. */ 3229 case 0xd: tap->wr_rate = 12; break; 3230 case 0xf: tap->wr_rate = 18; break; 3231 case 0x5: tap->wr_rate = 24; break; 3232 case 0x7: tap->wr_rate = 36; break; 3233 case 0x9: tap->wr_rate = 48; break; 3234 case 0xb: tap->wr_rate = 72; break; 3235 case 0x1: tap->wr_rate = 96; break; 3236 case 0x3: tap->wr_rate = 108; break; 3237 /* Unknown rate: should not happen. */ 3238 default: tap->wr_rate = 0; 3239 } 3240 } 3241 3242 IWM_UNLOCK(sc); 3243 if (ni != NULL) { 3244 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3245 ieee80211_input_mimo(ni, m); 3246 ieee80211_free_node(ni); 3247 } else { 3248 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3249 ieee80211_input_mimo_all(ic, m); 3250 } 3251 IWM_LOCK(sc); 3252 3253 return TRUE; 3254 3255 fail: 3256 counter_u64_add(ic->ic_ierrors, 1); 3257 return FALSE; 3258 } 3259 3260 static int 3261 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3262 struct iwm_node *in) 3263 { 3264 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data; 3265 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 3266 struct ieee80211_node *ni = &in->in_ni; 3267 struct ieee80211vap *vap = ni->ni_vap; 3268 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3269 int new_rate, cur_rate = vap->iv_bss->ni_txrate; 3270 boolean_t rate_matched; 3271 uint8_t tx_resp_rate; 3272 3273 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3274 3275 /* Update rate control statistics. */ 3276 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3277 __func__, 3278 (int) le16toh(tx_resp->status.status), 3279 (int) le16toh(tx_resp->status.sequence), 3280 tx_resp->frame_count, 3281 tx_resp->bt_kill_count, 3282 tx_resp->failure_rts, 3283 tx_resp->failure_frame, 3284 le32toh(tx_resp->initial_rate), 3285 (int) le16toh(tx_resp->wireless_media_time)); 3286 3287 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate)); 3288 3289 /* For rate control, ignore frames sent at different initial rate */ 3290 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate); 3291 3292 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) { 3293 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3294 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u " 3295 "ni_txrate=%d)\n", tx_resp_rate, cur_rate); 3296 } 3297 3298 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 3299 IEEE80211_RATECTL_STATUS_LONG_RETRY; 3300 txs->short_retries = tx_resp->failure_rts; 3301 txs->long_retries = tx_resp->failure_frame; 3302 if (status != IWM_TX_STATUS_SUCCESS && 3303 status != IWM_TX_STATUS_DIRECT_DONE) { 3304 switch (status) { 3305 case IWM_TX_STATUS_FAIL_SHORT_LIMIT: 3306 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 3307 break; 3308 case IWM_TX_STATUS_FAIL_LONG_LIMIT: 3309 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 3310 break; 3311 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE: 3312 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 3313 break; 3314 default: 3315 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 3316 break; 3317 } 3318 } else { 3319 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 3320 } 3321 3322 if (rate_matched) { 3323 ieee80211_ratectl_tx_complete(ni, txs); 3324 3325 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0); 3326 new_rate = vap->iv_bss->ni_txrate; 3327 if (new_rate != 0 && new_rate != cur_rate) { 3328 struct iwm_node *in = IWM_NODE(vap->iv_bss); 3329 iwm_setrates(sc, in, rix); 3330 iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE); 3331 } 3332 } 3333 3334 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS); 3335 } 3336 3337 static void 3338 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3339 { 3340 struct iwm_cmd_header *cmd_hdr = &pkt->hdr; 3341 int idx = cmd_hdr->idx; 3342 int qid = cmd_hdr->qid; 3343 struct iwm_tx_ring *ring = &sc->txq[qid]; 3344 struct iwm_tx_data *txd = &ring->data[idx]; 3345 struct iwm_node *in = txd->in; 3346 struct mbuf *m = txd->m; 3347 int status; 3348 3349 KASSERT(txd->done == 0, ("txd not done")); 3350 KASSERT(txd->in != NULL, ("txd without node")); 3351 KASSERT(txd->m != NULL, ("txd without mbuf")); 3352 3353 sc->sc_tx_timer = 0; 3354 3355 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in); 3356 3357 /* Unmap and free mbuf. */ 3358 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3359 bus_dmamap_unload(ring->data_dmat, txd->map); 3360 3361 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3362 "free txd %p, in %p\n", txd, txd->in); 3363 txd->done = 1; 3364 txd->m = NULL; 3365 txd->in = NULL; 3366 3367 ieee80211_tx_complete(&in->in_ni, m, status); 3368 3369 if (--ring->queued < IWM_TX_RING_LOMARK) { 3370 sc->qfullmsk &= ~(1 << ring->qid); 3371 if (sc->qfullmsk == 0) { 3372 iwm_start(sc); 3373 } 3374 } 3375 } 3376 3377 /* 3378 * transmit side 3379 */ 3380 3381 /* 3382 * Process a "command done" firmware notification. This is where we wakeup 3383 * processes waiting for a synchronous command completion. 3384 * from if_iwn 3385 */ 3386 static void 3387 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3388 { 3389 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE]; 3390 struct iwm_tx_data *data; 3391 3392 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) { 3393 return; /* Not a command ack. */ 3394 } 3395 3396 /* XXX wide commands? */ 3397 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 3398 "cmd notification type 0x%x qid %d idx %d\n", 3399 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx); 3400 3401 data = &ring->data[pkt->hdr.idx]; 3402 3403 /* If the command was mapped in an mbuf, free it. */ 3404 if (data->m != NULL) { 3405 bus_dmamap_sync(ring->data_dmat, data->map, 3406 BUS_DMASYNC_POSTWRITE); 3407 bus_dmamap_unload(ring->data_dmat, data->map); 3408 m_freem(data->m); 3409 data->m = NULL; 3410 } 3411 wakeup(&ring->desc[pkt->hdr.idx]); 3412 3413 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) { 3414 device_printf(sc->sc_dev, 3415 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n", 3416 __func__, pkt->hdr.idx, ring->queued, ring->cur); 3417 /* XXX call iwm_force_nmi() */ 3418 } 3419 3420 KASSERT(ring->queued > 0, ("ring->queued is empty?")); 3421 ring->queued--; 3422 if (ring->queued == 0) 3423 iwm_pcie_clear_cmd_in_flight(sc); 3424 } 3425 3426 #if 0 3427 /* 3428 * necessary only for block ack mode 3429 */ 3430 void 3431 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3432 uint16_t len) 3433 { 3434 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3435 uint16_t w_val; 3436 3437 scd_bc_tbl = sc->sched_dma.vaddr; 3438 3439 len += 8; /* magic numbers came naturally from paris */ 3440 len = roundup(len, 4) / 4; 3441 3442 w_val = htole16(sta_id << 12 | len); 3443 3444 /* Update TX scheduler. */ 3445 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3446 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3447 BUS_DMASYNC_PREWRITE); 3448 3449 /* I really wonder what this is ?!? */ 3450 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3451 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3452 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3453 BUS_DMASYNC_PREWRITE); 3454 } 3455 } 3456 #endif 3457 3458 static int 3459 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate) 3460 { 3461 int i; 3462 3463 for (i = 0; i < nitems(iwm_rates); i++) { 3464 if (iwm_rates[i].rate == rate) 3465 return (i); 3466 } 3467 /* XXX error? */ 3468 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3469 "%s: couldn't find an entry for rate=%d\n", 3470 __func__, 3471 rate); 3472 return (0); 3473 } 3474 3475 /* 3476 * Fill in the rate related information for a transmit command. 3477 */ 3478 static const struct iwm_rate * 3479 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3480 struct mbuf *m, struct iwm_tx_cmd *tx) 3481 { 3482 struct ieee80211_node *ni = &in->in_ni; 3483 struct ieee80211_frame *wh; 3484 const struct ieee80211_txparam *tp = ni->ni_txparms; 3485 const struct iwm_rate *rinfo; 3486 int type; 3487 int ridx, rate_flags; 3488 3489 wh = mtod(m, struct ieee80211_frame *); 3490 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3491 3492 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3493 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3494 3495 if (type == IEEE80211_FC0_TYPE_MGT || 3496 type == IEEE80211_FC0_TYPE_CTL || 3497 (m->m_flags & M_EAPOL) != 0) { 3498 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3499 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3500 "%s: MGT (%d)\n", __func__, tp->mgmtrate); 3501 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3502 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate); 3503 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3504 "%s: MCAST (%d)\n", __func__, tp->mcastrate); 3505 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3506 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate); 3507 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3508 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate); 3509 } else { 3510 /* for data frames, use RS table */ 3511 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__); 3512 ridx = iwm_rate2ridx(sc, ni->ni_txrate); 3513 if (ridx == -1) 3514 ridx = 0; 3515 3516 /* This is the index into the programmed table */ 3517 tx->initial_rate_index = 0; 3518 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3519 } 3520 3521 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3522 "%s: frame type=%d txrate %d\n", 3523 __func__, type, iwm_rates[ridx].rate); 3524 3525 rinfo = &iwm_rates[ridx]; 3526 3527 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n", 3528 __func__, ridx, 3529 rinfo->rate, 3530 !! (IWM_RIDX_IS_CCK(ridx)) 3531 ); 3532 3533 /* XXX TODO: hard-coded TX antenna? */ 3534 rate_flags = 1 << IWM_RATE_MCS_ANT_POS; 3535 if (IWM_RIDX_IS_CCK(ridx)) 3536 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3537 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3538 3539 return rinfo; 3540 } 3541 3542 #define TB0_SIZE 16 3543 static int 3544 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3545 { 3546 struct ieee80211com *ic = &sc->sc_ic; 3547 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3548 struct iwm_node *in = IWM_NODE(ni); 3549 struct iwm_tx_ring *ring; 3550 struct iwm_tx_data *data; 3551 struct iwm_tfd *desc; 3552 struct iwm_device_cmd *cmd; 3553 struct iwm_tx_cmd *tx; 3554 struct ieee80211_frame *wh; 3555 struct ieee80211_key *k = NULL; 3556 struct mbuf *m1; 3557 const struct iwm_rate *rinfo; 3558 uint32_t flags; 3559 u_int hdrlen; 3560 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3561 int nsegs; 3562 uint8_t tid, type; 3563 int i, totlen, error, pad; 3564 3565 wh = mtod(m, struct ieee80211_frame *); 3566 hdrlen = ieee80211_anyhdrsize(wh); 3567 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3568 tid = 0; 3569 ring = &sc->txq[ac]; 3570 desc = &ring->desc[ring->cur]; 3571 memset(desc, 0, sizeof(*desc)); 3572 data = &ring->data[ring->cur]; 3573 3574 /* Fill out iwm_tx_cmd to send to the firmware */ 3575 cmd = &ring->cmd[ring->cur]; 3576 cmd->hdr.code = IWM_TX_CMD; 3577 cmd->hdr.flags = 0; 3578 cmd->hdr.qid = ring->qid; 3579 cmd->hdr.idx = ring->cur; 3580 3581 tx = (void *)cmd->data; 3582 memset(tx, 0, sizeof(*tx)); 3583 3584 rinfo = iwm_tx_fill_cmd(sc, in, m, tx); 3585 3586 /* Encrypt the frame if need be. */ 3587 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3588 /* Retrieve key for TX && do software encryption. */ 3589 k = ieee80211_crypto_encap(ni, m); 3590 if (k == NULL) { 3591 m_freem(m); 3592 return (ENOBUFS); 3593 } 3594 /* 802.11 header may have moved. */ 3595 wh = mtod(m, struct ieee80211_frame *); 3596 } 3597 3598 if (ieee80211_radiotap_active_vap(vap)) { 3599 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3600 3601 tap->wt_flags = 0; 3602 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3603 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3604 tap->wt_rate = rinfo->rate; 3605 if (k != NULL) 3606 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3607 ieee80211_radiotap_tx(vap, m); 3608 } 3609 3610 3611 totlen = m->m_pkthdr.len; 3612 3613 flags = 0; 3614 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3615 flags |= IWM_TX_CMD_FLG_ACK; 3616 } 3617 3618 if (type == IEEE80211_FC0_TYPE_DATA 3619 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) 3620 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3621 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3622 } 3623 3624 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3625 type != IEEE80211_FC0_TYPE_DATA) 3626 tx->sta_id = sc->sc_aux_sta.sta_id; 3627 else 3628 tx->sta_id = IWM_STATION_ID; 3629 3630 if (type == IEEE80211_FC0_TYPE_MGT) { 3631 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3632 3633 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3634 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { 3635 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC); 3636 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) { 3637 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3638 } else { 3639 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT); 3640 } 3641 } else { 3642 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3643 } 3644 3645 if (hdrlen & 3) { 3646 /* First segment length must be a multiple of 4. */ 3647 flags |= IWM_TX_CMD_FLG_MH_PAD; 3648 pad = 4 - (hdrlen & 3); 3649 } else 3650 pad = 0; 3651 3652 tx->driver_txop = 0; 3653 tx->next_frame_len = 0; 3654 3655 tx->len = htole16(totlen); 3656 tx->tid_tspec = tid; 3657 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3658 3659 /* Set physical address of "scratch area". */ 3660 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3661 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3662 3663 /* Copy 802.11 header in TX command. */ 3664 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3665 3666 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3667 3668 tx->sec_ctl = 0; 3669 tx->tx_flags |= htole32(flags); 3670 3671 /* Trim 802.11 header. */ 3672 m_adj(m, hdrlen); 3673 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3674 segs, &nsegs, BUS_DMA_NOWAIT); 3675 if (error != 0) { 3676 if (error != EFBIG) { 3677 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3678 error); 3679 m_freem(m); 3680 return error; 3681 } 3682 /* Too many DMA segments, linearize mbuf. */ 3683 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 3684 if (m1 == NULL) { 3685 device_printf(sc->sc_dev, 3686 "%s: could not defrag mbuf\n", __func__); 3687 m_freem(m); 3688 return (ENOBUFS); 3689 } 3690 m = m1; 3691 3692 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3693 segs, &nsegs, BUS_DMA_NOWAIT); 3694 if (error != 0) { 3695 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3696 error); 3697 m_freem(m); 3698 return error; 3699 } 3700 } 3701 data->m = m; 3702 data->in = in; 3703 data->done = 0; 3704 3705 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3706 "sending txd %p, in %p\n", data, data->in); 3707 KASSERT(data->in != NULL, ("node is NULL")); 3708 3709 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3710 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3711 ring->qid, ring->cur, totlen, nsegs, 3712 le32toh(tx->tx_flags), 3713 le32toh(tx->rate_n_flags), 3714 tx->initial_rate_index 3715 ); 3716 3717 /* Fill TX descriptor. */ 3718 desc->num_tbs = 2 + nsegs; 3719 3720 desc->tbs[0].lo = htole32(data->cmd_paddr); 3721 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3722 (TB0_SIZE << 4); 3723 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3724 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3725 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) 3726 + hdrlen + pad - TB0_SIZE) << 4); 3727 3728 /* Other DMA segments are for data payload. */ 3729 for (i = 0; i < nsegs; i++) { 3730 seg = &segs[i]; 3731 desc->tbs[i+2].lo = htole32(seg->ds_addr); 3732 desc->tbs[i+2].hi_n_len = \ 3733 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) 3734 | ((seg->ds_len) << 4); 3735 } 3736 3737 bus_dmamap_sync(ring->data_dmat, data->map, 3738 BUS_DMASYNC_PREWRITE); 3739 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3740 BUS_DMASYNC_PREWRITE); 3741 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3742 BUS_DMASYNC_PREWRITE); 3743 3744 #if 0 3745 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3746 #endif 3747 3748 /* Kick TX ring. */ 3749 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3750 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3751 3752 /* Mark TX ring as full if we reach a certain threshold. */ 3753 if (++ring->queued > IWM_TX_RING_HIMARK) { 3754 sc->qfullmsk |= 1 << ring->qid; 3755 } 3756 3757 return 0; 3758 } 3759 3760 static int 3761 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3762 const struct ieee80211_bpf_params *params) 3763 { 3764 struct ieee80211com *ic = ni->ni_ic; 3765 struct iwm_softc *sc = ic->ic_softc; 3766 int error = 0; 3767 3768 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3769 "->%s begin\n", __func__); 3770 3771 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3772 m_freem(m); 3773 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3774 "<-%s not RUNNING\n", __func__); 3775 return (ENETDOWN); 3776 } 3777 3778 IWM_LOCK(sc); 3779 /* XXX fix this */ 3780 if (params == NULL) { 3781 error = iwm_tx(sc, m, ni, 0); 3782 } else { 3783 error = iwm_tx(sc, m, ni, 0); 3784 } 3785 if (sc->sc_tx_timer == 0) 3786 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 3787 sc->sc_tx_timer = 5; 3788 IWM_UNLOCK(sc); 3789 3790 return (error); 3791 } 3792 3793 /* 3794 * mvm/tx.c 3795 */ 3796 3797 /* 3798 * Note that there are transports that buffer frames before they reach 3799 * the firmware. This means that after flush_tx_path is called, the 3800 * queue might not be empty. The race-free way to handle this is to: 3801 * 1) set the station as draining 3802 * 2) flush the Tx path 3803 * 3) wait for the transport queues to be empty 3804 */ 3805 int 3806 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags) 3807 { 3808 int ret; 3809 struct iwm_tx_path_flush_cmd flush_cmd = { 3810 .queues_ctl = htole32(tfd_msk), 3811 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3812 }; 3813 3814 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags, 3815 sizeof(flush_cmd), &flush_cmd); 3816 if (ret) 3817 device_printf(sc->sc_dev, 3818 "Flushing tx queue failed: %d\n", ret); 3819 return ret; 3820 } 3821 3822 /* 3823 * BEGIN mvm/quota.c 3824 */ 3825 3826 static int 3827 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp) 3828 { 3829 struct iwm_time_quota_cmd cmd; 3830 int i, idx, ret, num_active_macs, quota, quota_rem; 3831 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 3832 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 3833 uint16_t id; 3834 3835 memset(&cmd, 0, sizeof(cmd)); 3836 3837 /* currently, PHY ID == binding ID */ 3838 if (ivp) { 3839 id = ivp->phy_ctxt->id; 3840 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 3841 colors[id] = ivp->phy_ctxt->color; 3842 3843 if (1) 3844 n_ifs[id] = 1; 3845 } 3846 3847 /* 3848 * The FW's scheduling session consists of 3849 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments 3850 * equally between all the bindings that require quota 3851 */ 3852 num_active_macs = 0; 3853 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 3854 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 3855 num_active_macs += n_ifs[i]; 3856 } 3857 3858 quota = 0; 3859 quota_rem = 0; 3860 if (num_active_macs) { 3861 quota = IWM_MVM_MAX_QUOTA / num_active_macs; 3862 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs; 3863 } 3864 3865 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 3866 if (colors[i] < 0) 3867 continue; 3868 3869 cmd.quotas[idx].id_and_color = 3870 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 3871 3872 if (n_ifs[i] <= 0) { 3873 cmd.quotas[idx].quota = htole32(0); 3874 cmd.quotas[idx].max_duration = htole32(0); 3875 } else { 3876 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 3877 cmd.quotas[idx].max_duration = htole32(0); 3878 } 3879 idx++; 3880 } 3881 3882 /* Give the remainder of the session to the first binding */ 3883 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 3884 3885 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 3886 sizeof(cmd), &cmd); 3887 if (ret) 3888 device_printf(sc->sc_dev, 3889 "%s: Failed to send quota: %d\n", __func__, ret); 3890 return ret; 3891 } 3892 3893 /* 3894 * END mvm/quota.c 3895 */ 3896 3897 /* 3898 * ieee80211 routines 3899 */ 3900 3901 /* 3902 * Change to AUTH state in 80211 state machine. Roughly matches what 3903 * Linux does in bss_info_changed(). 3904 */ 3905 static int 3906 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 3907 { 3908 struct ieee80211_node *ni; 3909 struct iwm_node *in; 3910 struct iwm_vap *iv = IWM_VAP(vap); 3911 uint32_t duration; 3912 int error; 3913 3914 /* 3915 * XXX i have a feeling that the vap node is being 3916 * freed from underneath us. Grr. 3917 */ 3918 ni = ieee80211_ref_node(vap->iv_bss); 3919 in = IWM_NODE(ni); 3920 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 3921 "%s: called; vap=%p, bss ni=%p\n", 3922 __func__, 3923 vap, 3924 ni); 3925 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n", 3926 __func__, ether_sprintf(ni->ni_bssid)); 3927 3928 in->in_assoc = 0; 3929 iv->iv_auth = 1; 3930 3931 /* 3932 * Firmware bug - it'll crash if the beacon interval is less 3933 * than 16. We can't avoid connecting at all, so refuse the 3934 * station state change, this will cause net80211 to abandon 3935 * attempts to connect to this AP, and eventually wpa_s will 3936 * blacklist the AP... 3937 */ 3938 if (ni->ni_intval < 16) { 3939 device_printf(sc->sc_dev, 3940 "AP %s beacon interval is %d, refusing due to firmware bug!\n", 3941 ether_sprintf(ni->ni_bssid), ni->ni_intval); 3942 error = EINVAL; 3943 goto out; 3944 } 3945 3946 error = iwm_allow_mcast(vap, sc); 3947 if (error) { 3948 device_printf(sc->sc_dev, 3949 "%s: failed to set multicast\n", __func__); 3950 goto out; 3951 } 3952 3953 /* 3954 * This is where it deviates from what Linux does. 3955 * 3956 * Linux iwlwifi doesn't reset the nic each time, nor does it 3957 * call ctxt_add() here. Instead, it adds it during vap creation, 3958 * and always does a mac_ctx_changed(). 3959 * 3960 * The openbsd port doesn't attempt to do that - it reset things 3961 * at odd states and does the add here. 3962 * 3963 * So, until the state handling is fixed (ie, we never reset 3964 * the NIC except for a firmware failure, which should drag 3965 * the NIC back to IDLE, re-setup and re-add all the mac/phy 3966 * contexts that are required), let's do a dirty hack here. 3967 */ 3968 if (iv->is_uploaded) { 3969 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 3970 device_printf(sc->sc_dev, 3971 "%s: failed to update MAC\n", __func__); 3972 goto out; 3973 } 3974 } else { 3975 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) { 3976 device_printf(sc->sc_dev, 3977 "%s: failed to add MAC\n", __func__); 3978 goto out; 3979 } 3980 } 3981 sc->sc_firmware_state = 1; 3982 3983 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 3984 in->in_ni.ni_chan, 1, 1)) != 0) { 3985 device_printf(sc->sc_dev, 3986 "%s: failed update phy ctxt\n", __func__); 3987 goto out; 3988 } 3989 iv->phy_ctxt = &sc->sc_phyctxt[0]; 3990 3991 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) { 3992 device_printf(sc->sc_dev, 3993 "%s: binding update cmd\n", __func__); 3994 goto out; 3995 } 3996 sc->sc_firmware_state = 2; 3997 /* 3998 * Authentication becomes unreliable when powersaving is left enabled 3999 * here. Powersaving will be activated again when association has 4000 * finished or is aborted. 4001 */ 4002 iv->ps_disabled = TRUE; 4003 error = iwm_mvm_power_update_mac(sc); 4004 iv->ps_disabled = FALSE; 4005 if (error != 0) { 4006 device_printf(sc->sc_dev, 4007 "%s: failed to update power management\n", 4008 __func__); 4009 goto out; 4010 } 4011 if ((error = iwm_mvm_add_sta(sc, in)) != 0) { 4012 device_printf(sc->sc_dev, 4013 "%s: failed to add sta\n", __func__); 4014 goto out; 4015 } 4016 sc->sc_firmware_state = 3; 4017 4018 /* 4019 * Prevent the FW from wandering off channel during association 4020 * by "protecting" the session with a time event. 4021 */ 4022 /* XXX duration is in units of TU, not MS */ 4023 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 4024 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE); 4025 4026 error = 0; 4027 out: 4028 if (error != 0) 4029 iv->iv_auth = 0; 4030 ieee80211_free_node(ni); 4031 return (error); 4032 } 4033 4034 static struct ieee80211_node * 4035 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4036 { 4037 return malloc(sizeof (struct iwm_node), M_80211_NODE, 4038 M_NOWAIT | M_ZERO); 4039 } 4040 4041 static uint8_t 4042 iwm_rate_from_ucode_rate(uint32_t rate_n_flags) 4043 { 4044 uint8_t plcp = rate_n_flags & 0xff; 4045 int i; 4046 4047 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4048 if (iwm_rates[i].plcp == plcp) 4049 return iwm_rates[i].rate; 4050 } 4051 return 0; 4052 } 4053 4054 uint8_t 4055 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx) 4056 { 4057 int i; 4058 uint8_t rval; 4059 4060 for (i = 0; i < rs->rs_nrates; i++) { 4061 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 4062 if (rval == iwm_rates[ridx].rate) 4063 return rs->rs_rates[i]; 4064 } 4065 4066 return 0; 4067 } 4068 4069 static int 4070 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate) 4071 { 4072 int i; 4073 4074 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4075 if (iwm_rates[i].rate == rate) 4076 return i; 4077 } 4078 4079 device_printf(sc->sc_dev, 4080 "%s: WARNING: device rate for %u not found!\n", 4081 __func__, rate); 4082 4083 return -1; 4084 } 4085 4086 4087 static void 4088 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix) 4089 { 4090 struct ieee80211_node *ni = &in->in_ni; 4091 struct iwm_lq_cmd *lq = &in->in_lq; 4092 struct ieee80211_rateset *rs = &ni->ni_rates; 4093 int nrates = rs->rs_nrates; 4094 int i, ridx, tab = 0; 4095 // int txant = 0; 4096 4097 KASSERT(rix >= 0 && rix < nrates, ("invalid rix")); 4098 4099 if (nrates > nitems(lq->rs_table)) { 4100 device_printf(sc->sc_dev, 4101 "%s: node supports %d rates, driver handles " 4102 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4103 return; 4104 } 4105 if (nrates == 0) { 4106 device_printf(sc->sc_dev, 4107 "%s: node supports 0 rates, odd!\n", __func__); 4108 return; 4109 } 4110 nrates = imin(rix + 1, nrates); 4111 4112 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4113 "%s: nrates=%d\n", __func__, nrates); 4114 4115 /* then construct a lq_cmd based on those */ 4116 memset(lq, 0, sizeof(*lq)); 4117 lq->sta_id = IWM_STATION_ID; 4118 4119 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4120 if (ni->ni_flags & IEEE80211_NODE_HT) 4121 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4122 4123 /* 4124 * are these used? (we don't do SISO or MIMO) 4125 * need to set them to non-zero, though, or we get an error. 4126 */ 4127 lq->single_stream_ant_msk = 1; 4128 lq->dual_stream_ant_msk = 1; 4129 4130 /* 4131 * Build the actual rate selection table. 4132 * The lowest bits are the rates. Additionally, 4133 * CCK needs bit 9 to be set. The rest of the bits 4134 * we add to the table select the tx antenna 4135 * Note that we add the rates in the highest rate first 4136 * (opposite of ni_rates). 4137 */ 4138 for (i = 0; i < nrates; i++) { 4139 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL; 4140 int nextant; 4141 4142 /* Map 802.11 rate to HW rate index. */ 4143 ridx = iwm_rate2ridx(sc, rate); 4144 if (ridx == -1) 4145 continue; 4146 4147 #if 0 4148 if (txant == 0) 4149 txant = iwm_mvm_get_valid_tx_ant(sc); 4150 nextant = 1<<(ffs(txant)-1); 4151 txant &= ~nextant; 4152 #else 4153 nextant = iwm_mvm_get_valid_tx_ant(sc); 4154 #endif 4155 tab = iwm_rates[ridx].plcp; 4156 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4157 if (IWM_RIDX_IS_CCK(ridx)) 4158 tab |= IWM_RATE_MCS_CCK_MSK; 4159 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4160 "station rate i=%d, rate=%d, hw=%x\n", 4161 i, iwm_rates[ridx].rate, tab); 4162 lq->rs_table[i] = htole32(tab); 4163 } 4164 /* then fill the rest with the lowest possible rate */ 4165 for (i = nrates; i < nitems(lq->rs_table); i++) { 4166 KASSERT(tab != 0, ("invalid tab")); 4167 lq->rs_table[i] = htole32(tab); 4168 } 4169 } 4170 4171 static int 4172 iwm_media_change(struct ifnet *ifp) 4173 { 4174 struct ieee80211vap *vap = ifp->if_softc; 4175 struct ieee80211com *ic = vap->iv_ic; 4176 struct iwm_softc *sc = ic->ic_softc; 4177 int error; 4178 4179 error = ieee80211_media_change(ifp); 4180 if (error != ENETRESET) 4181 return error; 4182 4183 IWM_LOCK(sc); 4184 if (ic->ic_nrunning > 0) { 4185 iwm_stop(sc); 4186 iwm_init(sc); 4187 } 4188 IWM_UNLOCK(sc); 4189 return error; 4190 } 4191 4192 static void 4193 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap) 4194 { 4195 struct iwm_vap *ivp = IWM_VAP(vap); 4196 int error; 4197 4198 /* Avoid Tx watchdog triggering, when transfers get dropped here. */ 4199 sc->sc_tx_timer = 0; 4200 4201 ivp->iv_auth = 0; 4202 if (sc->sc_firmware_state == 3) { 4203 iwm_xmit_queue_drain(sc); 4204 // iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC); 4205 error = iwm_mvm_rm_sta(sc, vap, TRUE); 4206 if (error) { 4207 device_printf(sc->sc_dev, 4208 "%s: Failed to remove station: %d\n", 4209 __func__, error); 4210 } 4211 } 4212 if (sc->sc_firmware_state == 3) { 4213 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4214 if (error) { 4215 device_printf(sc->sc_dev, 4216 "%s: Failed to change mac context: %d\n", 4217 __func__, error); 4218 } 4219 } 4220 if (sc->sc_firmware_state == 3) { 4221 error = iwm_mvm_sf_update(sc, vap, FALSE); 4222 if (error) { 4223 device_printf(sc->sc_dev, 4224 "%s: Failed to update smart FIFO: %d\n", 4225 __func__, error); 4226 } 4227 } 4228 if (sc->sc_firmware_state == 3) { 4229 error = iwm_mvm_rm_sta_id(sc, vap); 4230 if (error) { 4231 device_printf(sc->sc_dev, 4232 "%s: Failed to remove station id: %d\n", 4233 __func__, error); 4234 } 4235 } 4236 if (sc->sc_firmware_state == 3) { 4237 error = iwm_mvm_update_quotas(sc, NULL); 4238 if (error) { 4239 device_printf(sc->sc_dev, 4240 "%s: Failed to update PHY quota: %d\n", 4241 __func__, error); 4242 } 4243 } 4244 if (sc->sc_firmware_state == 3) { 4245 /* XXX Might need to specify bssid correctly. */ 4246 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4247 if (error) { 4248 device_printf(sc->sc_dev, 4249 "%s: Failed to change mac context: %d\n", 4250 __func__, error); 4251 } 4252 } 4253 if (sc->sc_firmware_state == 3) { 4254 sc->sc_firmware_state = 2; 4255 } 4256 if (sc->sc_firmware_state > 1) { 4257 error = iwm_mvm_binding_remove_vif(sc, ivp); 4258 if (error) { 4259 device_printf(sc->sc_dev, 4260 "%s: Failed to remove channel ctx: %d\n", 4261 __func__, error); 4262 } 4263 } 4264 if (sc->sc_firmware_state > 1) { 4265 sc->sc_firmware_state = 1; 4266 } 4267 ivp->phy_ctxt = NULL; 4268 if (sc->sc_firmware_state > 0) { 4269 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4270 if (error) { 4271 device_printf(sc->sc_dev, 4272 "%s: Failed to change mac context: %d\n", 4273 __func__, error); 4274 } 4275 } 4276 if (sc->sc_firmware_state > 0) { 4277 error = iwm_mvm_power_update_mac(sc); 4278 if (error != 0) { 4279 device_printf(sc->sc_dev, 4280 "%s: failed to update power management\n", 4281 __func__); 4282 } 4283 } 4284 sc->sc_firmware_state = 0; 4285 } 4286 4287 static int 4288 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4289 { 4290 struct iwm_vap *ivp = IWM_VAP(vap); 4291 struct ieee80211com *ic = vap->iv_ic; 4292 struct iwm_softc *sc = ic->ic_softc; 4293 struct iwm_node *in; 4294 int error; 4295 4296 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4297 "switching state %s -> %s arg=0x%x\n", 4298 ieee80211_state_name[vap->iv_state], 4299 ieee80211_state_name[nstate], 4300 arg); 4301 4302 IEEE80211_UNLOCK(ic); 4303 IWM_LOCK(sc); 4304 4305 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) && 4306 (nstate == IEEE80211_S_AUTH || 4307 nstate == IEEE80211_S_ASSOC || 4308 nstate == IEEE80211_S_RUN)) { 4309 /* Stop blinking for a scan, when authenticating. */ 4310 iwm_led_blink_stop(sc); 4311 } 4312 4313 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 4314 iwm_mvm_led_disable(sc); 4315 /* disable beacon filtering if we're hopping out of RUN */ 4316 iwm_mvm_disable_beacon_filter(sc); 4317 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4318 in->in_assoc = 0; 4319 } 4320 4321 if ((vap->iv_state == IEEE80211_S_AUTH || 4322 vap->iv_state == IEEE80211_S_ASSOC || 4323 vap->iv_state == IEEE80211_S_RUN) && 4324 (nstate == IEEE80211_S_INIT || 4325 nstate == IEEE80211_S_SCAN || 4326 nstate == IEEE80211_S_AUTH)) { 4327 iwm_mvm_stop_session_protection(sc, ivp); 4328 } 4329 4330 if ((vap->iv_state == IEEE80211_S_RUN || 4331 vap->iv_state == IEEE80211_S_ASSOC) && 4332 nstate == IEEE80211_S_INIT) { 4333 /* 4334 * In this case, iv_newstate() wants to send an 80211 frame on 4335 * the network that we are leaving. So we need to call it, 4336 * before tearing down all the firmware state. 4337 */ 4338 IWM_UNLOCK(sc); 4339 IEEE80211_LOCK(ic); 4340 ivp->iv_newstate(vap, nstate, arg); 4341 IEEE80211_UNLOCK(ic); 4342 IWM_LOCK(sc); 4343 iwm_bring_down_firmware(sc, vap); 4344 IWM_UNLOCK(sc); 4345 IEEE80211_LOCK(ic); 4346 return 0; 4347 } 4348 4349 switch (nstate) { 4350 case IEEE80211_S_INIT: 4351 case IEEE80211_S_SCAN: 4352 break; 4353 4354 case IEEE80211_S_AUTH: 4355 iwm_bring_down_firmware(sc, vap); 4356 if ((error = iwm_auth(vap, sc)) != 0) { 4357 device_printf(sc->sc_dev, 4358 "%s: could not move to auth state: %d\n", 4359 __func__, error); 4360 iwm_bring_down_firmware(sc, vap); 4361 IWM_UNLOCK(sc); 4362 IEEE80211_LOCK(ic); 4363 return 1; 4364 } 4365 break; 4366 4367 case IEEE80211_S_ASSOC: 4368 /* 4369 * EBS may be disabled due to previous failures reported by FW. 4370 * Reset EBS status here assuming environment has been changed. 4371 */ 4372 sc->last_ebs_successful = TRUE; 4373 break; 4374 4375 case IEEE80211_S_RUN: 4376 in = IWM_NODE(vap->iv_bss); 4377 /* Update the association state, now we have it all */ 4378 /* (eg associd comes in at this point */ 4379 error = iwm_mvm_update_sta(sc, in); 4380 if (error != 0) { 4381 device_printf(sc->sc_dev, 4382 "%s: failed to update STA\n", __func__); 4383 IWM_UNLOCK(sc); 4384 IEEE80211_LOCK(ic); 4385 return error; 4386 } 4387 in->in_assoc = 1; 4388 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4389 if (error != 0) { 4390 device_printf(sc->sc_dev, 4391 "%s: failed to update MAC: %d\n", __func__, error); 4392 } 4393 4394 iwm_mvm_sf_update(sc, vap, FALSE); 4395 iwm_mvm_enable_beacon_filter(sc, ivp); 4396 iwm_mvm_power_update_mac(sc); 4397 iwm_mvm_update_quotas(sc, ivp); 4398 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0); 4399 iwm_setrates(sc, in, rix); 4400 4401 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) { 4402 device_printf(sc->sc_dev, 4403 "%s: IWM_LQ_CMD failed: %d\n", __func__, error); 4404 } 4405 4406 iwm_mvm_led_enable(sc); 4407 break; 4408 4409 default: 4410 break; 4411 } 4412 IWM_UNLOCK(sc); 4413 IEEE80211_LOCK(ic); 4414 4415 return (ivp->iv_newstate(vap, nstate, arg)); 4416 } 4417 4418 void 4419 iwm_endscan_cb(void *arg, int pending) 4420 { 4421 struct iwm_softc *sc = arg; 4422 struct ieee80211com *ic = &sc->sc_ic; 4423 4424 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4425 "%s: scan ended\n", 4426 __func__); 4427 4428 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4429 } 4430 4431 static int 4432 iwm_send_bt_init_conf(struct iwm_softc *sc) 4433 { 4434 struct iwm_bt_coex_cmd bt_cmd; 4435 4436 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4437 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4438 4439 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4440 &bt_cmd); 4441 } 4442 4443 static boolean_t 4444 iwm_mvm_is_lar_supported(struct iwm_softc *sc) 4445 { 4446 boolean_t nvm_lar = sc->nvm_data->lar_enabled; 4447 boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa, 4448 IWM_UCODE_TLV_CAPA_LAR_SUPPORT); 4449 4450 if (iwm_lar_disable) 4451 return FALSE; 4452 4453 /* 4454 * Enable LAR only if it is supported by the FW (TLV) && 4455 * enabled in the NVM 4456 */ 4457 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 4458 return nvm_lar && tlv_lar; 4459 else 4460 return tlv_lar; 4461 } 4462 4463 static boolean_t 4464 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc) 4465 { 4466 return fw_has_api(&sc->sc_fw.ucode_capa, 4467 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4468 fw_has_capa(&sc->sc_fw.ucode_capa, 4469 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC); 4470 } 4471 4472 static int 4473 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4474 { 4475 struct iwm_mcc_update_cmd mcc_cmd; 4476 struct iwm_host_cmd hcmd = { 4477 .id = IWM_MCC_UPDATE_CMD, 4478 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4479 .data = { &mcc_cmd }, 4480 }; 4481 int ret; 4482 #ifdef IWM_DEBUG 4483 struct iwm_rx_packet *pkt; 4484 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4485 struct iwm_mcc_update_resp *mcc_resp; 4486 int n_channels; 4487 uint16_t mcc; 4488 #endif 4489 int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa, 4490 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4491 4492 if (!iwm_mvm_is_lar_supported(sc)) { 4493 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n", 4494 __func__); 4495 return 0; 4496 } 4497 4498 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4499 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4500 if (iwm_mvm_is_wifi_mcc_supported(sc)) 4501 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4502 else 4503 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4504 4505 if (resp_v2) 4506 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4507 else 4508 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4509 4510 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4511 "send MCC update to FW with '%c%c' src = %d\n", 4512 alpha2[0], alpha2[1], mcc_cmd.source_id); 4513 4514 ret = iwm_send_cmd(sc, &hcmd); 4515 if (ret) 4516 return ret; 4517 4518 #ifdef IWM_DEBUG 4519 pkt = hcmd.resp_pkt; 4520 4521 /* Extract MCC response */ 4522 if (resp_v2) { 4523 mcc_resp = (void *)pkt->data; 4524 mcc = mcc_resp->mcc; 4525 n_channels = le32toh(mcc_resp->n_channels); 4526 } else { 4527 mcc_resp_v1 = (void *)pkt->data; 4528 mcc = mcc_resp_v1->mcc; 4529 n_channels = le32toh(mcc_resp_v1->n_channels); 4530 } 4531 4532 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4533 if (mcc == 0) 4534 mcc = 0x3030; /* "00" - world */ 4535 4536 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4537 "regulatory domain '%c%c' (%d channels available)\n", 4538 mcc >> 8, mcc & 0xff, n_channels); 4539 #endif 4540 iwm_free_resp(sc, &hcmd); 4541 4542 return 0; 4543 } 4544 4545 static void 4546 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4547 { 4548 struct iwm_host_cmd cmd = { 4549 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4550 .len = { sizeof(uint32_t), }, 4551 .data = { &backoff, }, 4552 }; 4553 4554 if (iwm_send_cmd(sc, &cmd) != 0) { 4555 device_printf(sc->sc_dev, 4556 "failed to change thermal tx backoff\n"); 4557 } 4558 } 4559 4560 static int 4561 iwm_init_hw(struct iwm_softc *sc) 4562 { 4563 struct ieee80211com *ic = &sc->sc_ic; 4564 int error, i, ac; 4565 4566 sc->sf_state = IWM_SF_UNINIT; 4567 4568 if ((error = iwm_start_hw(sc)) != 0) { 4569 printf("iwm_start_hw: failed %d\n", error); 4570 return error; 4571 } 4572 4573 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) { 4574 printf("iwm_run_init_mvm_ucode: failed %d\n", error); 4575 return error; 4576 } 4577 4578 /* 4579 * should stop and start HW since that INIT 4580 * image just loaded 4581 */ 4582 iwm_stop_device(sc); 4583 sc->sc_ps_disabled = FALSE; 4584 if ((error = iwm_start_hw(sc)) != 0) { 4585 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4586 return error; 4587 } 4588 4589 /* omstart, this time with the regular firmware */ 4590 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR); 4591 if (error) { 4592 device_printf(sc->sc_dev, "could not load firmware\n"); 4593 goto error; 4594 } 4595 4596 error = iwm_mvm_sf_update(sc, NULL, FALSE); 4597 if (error) 4598 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n"); 4599 4600 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4601 device_printf(sc->sc_dev, "bt init conf failed\n"); 4602 goto error; 4603 } 4604 4605 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 4606 if (error != 0) { 4607 device_printf(sc->sc_dev, "antenna config failed\n"); 4608 goto error; 4609 } 4610 4611 /* Send phy db control command and then phy db calibration */ 4612 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0) 4613 goto error; 4614 4615 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4616 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4617 goto error; 4618 } 4619 4620 /* Add auxiliary station for scanning */ 4621 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) { 4622 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4623 goto error; 4624 } 4625 4626 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4627 /* 4628 * The channel used here isn't relevant as it's 4629 * going to be overwritten in the other flows. 4630 * For now use the first channel we have. 4631 */ 4632 if ((error = iwm_mvm_phy_ctxt_add(sc, 4633 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4634 goto error; 4635 } 4636 4637 /* Initialize tx backoffs to the minimum. */ 4638 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 4639 iwm_mvm_tt_tx_backoff(sc, 0); 4640 4641 if (iwm_mvm_config_ltr(sc) != 0) 4642 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n"); 4643 4644 error = iwm_mvm_power_update_device(sc); 4645 if (error) 4646 goto error; 4647 4648 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4649 goto error; 4650 4651 if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4652 if ((error = iwm_mvm_config_umac_scan(sc)) != 0) 4653 goto error; 4654 } 4655 4656 /* Enable Tx queues. */ 4657 for (ac = 0; ac < WME_NUM_AC; ac++) { 4658 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4659 iwm_mvm_ac_to_tx_fifo[ac]); 4660 if (error) 4661 goto error; 4662 } 4663 4664 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) { 4665 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4666 goto error; 4667 } 4668 4669 return 0; 4670 4671 error: 4672 iwm_stop_device(sc); 4673 return error; 4674 } 4675 4676 /* Allow multicast from our BSSID. */ 4677 static int 4678 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4679 { 4680 struct ieee80211_node *ni = vap->iv_bss; 4681 struct iwm_mcast_filter_cmd *cmd; 4682 size_t size; 4683 int error; 4684 4685 size = roundup(sizeof(*cmd), 4); 4686 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 4687 if (cmd == NULL) 4688 return ENOMEM; 4689 cmd->filter_own = 1; 4690 cmd->port_id = 0; 4691 cmd->count = 0; 4692 cmd->pass_all = 1; 4693 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4694 4695 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4696 IWM_CMD_SYNC, size, cmd); 4697 free(cmd, M_DEVBUF); 4698 4699 return (error); 4700 } 4701 4702 /* 4703 * ifnet interfaces 4704 */ 4705 4706 static void 4707 iwm_init(struct iwm_softc *sc) 4708 { 4709 int error; 4710 4711 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4712 return; 4713 } 4714 sc->sc_generation++; 4715 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4716 4717 if ((error = iwm_init_hw(sc)) != 0) { 4718 printf("iwm_init_hw failed %d\n", error); 4719 iwm_stop(sc); 4720 return; 4721 } 4722 4723 /* 4724 * Ok, firmware loaded and we are jogging 4725 */ 4726 sc->sc_flags |= IWM_FLAG_HW_INITED; 4727 } 4728 4729 static int 4730 iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4731 { 4732 struct iwm_softc *sc; 4733 int error; 4734 4735 sc = ic->ic_softc; 4736 4737 IWM_LOCK(sc); 4738 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4739 IWM_UNLOCK(sc); 4740 return (ENXIO); 4741 } 4742 error = mbufq_enqueue(&sc->sc_snd, m); 4743 if (error) { 4744 IWM_UNLOCK(sc); 4745 return (error); 4746 } 4747 iwm_start(sc); 4748 IWM_UNLOCK(sc); 4749 return (0); 4750 } 4751 4752 /* 4753 * Dequeue packets from sendq and call send. 4754 */ 4755 static void 4756 iwm_start(struct iwm_softc *sc) 4757 { 4758 struct ieee80211_node *ni; 4759 struct mbuf *m; 4760 int ac = 0; 4761 4762 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4763 while (sc->qfullmsk == 0 && 4764 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4765 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4766 if (iwm_tx(sc, m, ni, ac) != 0) { 4767 if_inc_counter(ni->ni_vap->iv_ifp, 4768 IFCOUNTER_OERRORS, 1); 4769 ieee80211_free_node(ni); 4770 continue; 4771 } 4772 if (sc->sc_tx_timer == 0) { 4773 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, 4774 sc); 4775 } 4776 sc->sc_tx_timer = 15; 4777 } 4778 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4779 } 4780 4781 static void 4782 iwm_stop(struct iwm_softc *sc) 4783 { 4784 4785 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 4786 sc->sc_flags |= IWM_FLAG_STOPPED; 4787 sc->sc_generation++; 4788 iwm_led_blink_stop(sc); 4789 sc->sc_tx_timer = 0; 4790 iwm_stop_device(sc); 4791 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 4792 } 4793 4794 static void 4795 iwm_watchdog(void *arg) 4796 { 4797 struct iwm_softc *sc = arg; 4798 struct ieee80211com *ic = &sc->sc_ic; 4799 4800 if (sc->sc_attached == 0) 4801 return; 4802 4803 if (sc->sc_tx_timer > 0) { 4804 if (--sc->sc_tx_timer == 0) { 4805 device_printf(sc->sc_dev, "device timeout\n"); 4806 #ifdef IWM_DEBUG 4807 iwm_nic_error(sc); 4808 #endif 4809 ieee80211_restart_all(ic); 4810 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4811 return; 4812 } 4813 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4814 } 4815 } 4816 4817 static void 4818 iwm_parent(struct ieee80211com *ic) 4819 { 4820 struct iwm_softc *sc = ic->ic_softc; 4821 int startall = 0; 4822 4823 IWM_LOCK(sc); 4824 if (ic->ic_nrunning > 0) { 4825 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 4826 iwm_init(sc); 4827 startall = 1; 4828 } 4829 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 4830 iwm_stop(sc); 4831 IWM_UNLOCK(sc); 4832 if (startall) 4833 ieee80211_start_all(ic); 4834 } 4835 4836 /* 4837 * The interrupt side of things 4838 */ 4839 4840 /* 4841 * error dumping routines are from iwlwifi/mvm/utils.c 4842 */ 4843 4844 /* 4845 * Note: This structure is read from the device with IO accesses, 4846 * and the reading already does the endian conversion. As it is 4847 * read with uint32_t-sized accesses, any members with a different size 4848 * need to be ordered correctly though! 4849 */ 4850 struct iwm_error_event_table { 4851 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4852 uint32_t error_id; /* type of error */ 4853 uint32_t trm_hw_status0; /* TRM HW status */ 4854 uint32_t trm_hw_status1; /* TRM HW status */ 4855 uint32_t blink2; /* branch link */ 4856 uint32_t ilink1; /* interrupt link */ 4857 uint32_t ilink2; /* interrupt link */ 4858 uint32_t data1; /* error-specific data */ 4859 uint32_t data2; /* error-specific data */ 4860 uint32_t data3; /* error-specific data */ 4861 uint32_t bcon_time; /* beacon timer */ 4862 uint32_t tsf_low; /* network timestamp function timer */ 4863 uint32_t tsf_hi; /* network timestamp function timer */ 4864 uint32_t gp1; /* GP1 timer register */ 4865 uint32_t gp2; /* GP2 timer register */ 4866 uint32_t fw_rev_type; /* firmware revision type */ 4867 uint32_t major; /* uCode version major */ 4868 uint32_t minor; /* uCode version minor */ 4869 uint32_t hw_ver; /* HW Silicon version */ 4870 uint32_t brd_ver; /* HW board version */ 4871 uint32_t log_pc; /* log program counter */ 4872 uint32_t frame_ptr; /* frame pointer */ 4873 uint32_t stack_ptr; /* stack pointer */ 4874 uint32_t hcmd; /* last host command header */ 4875 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 4876 * rxtx_flag */ 4877 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 4878 * host_flag */ 4879 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 4880 * enc_flag */ 4881 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 4882 * time_flag */ 4883 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 4884 * wico interrupt */ 4885 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 4886 uint32_t wait_event; /* wait event() caller address */ 4887 uint32_t l2p_control; /* L2pControlField */ 4888 uint32_t l2p_duration; /* L2pDurationField */ 4889 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 4890 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 4891 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 4892 * (LMPM_PMG_SEL) */ 4893 uint32_t u_timestamp; /* indicate when the date and time of the 4894 * compilation */ 4895 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 4896 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 4897 4898 /* 4899 * UMAC error struct - relevant starting from family 8000 chip. 4900 * Note: This structure is read from the device with IO accesses, 4901 * and the reading already does the endian conversion. As it is 4902 * read with u32-sized accesses, any members with a different size 4903 * need to be ordered correctly though! 4904 */ 4905 struct iwm_umac_error_event_table { 4906 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4907 uint32_t error_id; /* type of error */ 4908 uint32_t blink1; /* branch link */ 4909 uint32_t blink2; /* branch link */ 4910 uint32_t ilink1; /* interrupt link */ 4911 uint32_t ilink2; /* interrupt link */ 4912 uint32_t data1; /* error-specific data */ 4913 uint32_t data2; /* error-specific data */ 4914 uint32_t data3; /* error-specific data */ 4915 uint32_t umac_major; 4916 uint32_t umac_minor; 4917 uint32_t frame_pointer; /* core register 27*/ 4918 uint32_t stack_pointer; /* core register 28 */ 4919 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 4920 uint32_t nic_isr_pref; /* ISR status register */ 4921 } __packed; 4922 4923 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 4924 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 4925 4926 #ifdef IWM_DEBUG 4927 struct { 4928 const char *name; 4929 uint8_t num; 4930 } advanced_lookup[] = { 4931 { "NMI_INTERRUPT_WDG", 0x34 }, 4932 { "SYSASSERT", 0x35 }, 4933 { "UCODE_VERSION_MISMATCH", 0x37 }, 4934 { "BAD_COMMAND", 0x38 }, 4935 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 4936 { "FATAL_ERROR", 0x3D }, 4937 { "NMI_TRM_HW_ERR", 0x46 }, 4938 { "NMI_INTERRUPT_TRM", 0x4C }, 4939 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 4940 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 4941 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 4942 { "NMI_INTERRUPT_HOST", 0x66 }, 4943 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 4944 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 4945 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 4946 { "ADVANCED_SYSASSERT", 0 }, 4947 }; 4948 4949 static const char * 4950 iwm_desc_lookup(uint32_t num) 4951 { 4952 int i; 4953 4954 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 4955 if (advanced_lookup[i].num == num) 4956 return advanced_lookup[i].name; 4957 4958 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 4959 return advanced_lookup[i].name; 4960 } 4961 4962 static void 4963 iwm_nic_umac_error(struct iwm_softc *sc) 4964 { 4965 struct iwm_umac_error_event_table table; 4966 uint32_t base; 4967 4968 base = sc->umac_error_event_table; 4969 4970 if (base < 0x800000) { 4971 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 4972 base); 4973 return; 4974 } 4975 4976 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 4977 device_printf(sc->sc_dev, "reading errlog failed\n"); 4978 return; 4979 } 4980 4981 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 4982 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 4983 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 4984 sc->sc_flags, table.valid); 4985 } 4986 4987 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 4988 iwm_desc_lookup(table.error_id)); 4989 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 4990 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 4991 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 4992 table.ilink1); 4993 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 4994 table.ilink2); 4995 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 4996 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 4997 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 4998 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 4999 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 5000 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 5001 table.frame_pointer); 5002 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 5003 table.stack_pointer); 5004 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 5005 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 5006 table.nic_isr_pref); 5007 } 5008 5009 /* 5010 * Support for dumping the error log seemed like a good idea ... 5011 * but it's mostly hex junk and the only sensible thing is the 5012 * hw/ucode revision (which we know anyway). Since it's here, 5013 * I'll just leave it in, just in case e.g. the Intel guys want to 5014 * help us decipher some "ADVANCED_SYSASSERT" later. 5015 */ 5016 static void 5017 iwm_nic_error(struct iwm_softc *sc) 5018 { 5019 struct iwm_error_event_table table; 5020 uint32_t base; 5021 5022 device_printf(sc->sc_dev, "dumping device error log\n"); 5023 base = sc->error_event_table[0]; 5024 if (base < 0x800000) { 5025 device_printf(sc->sc_dev, 5026 "Invalid error log pointer 0x%08x\n", base); 5027 return; 5028 } 5029 5030 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5031 device_printf(sc->sc_dev, "reading errlog failed\n"); 5032 return; 5033 } 5034 5035 if (!table.valid) { 5036 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5037 return; 5038 } 5039 5040 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5041 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5042 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5043 sc->sc_flags, table.valid); 5044 } 5045 5046 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5047 iwm_desc_lookup(table.error_id)); 5048 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5049 table.trm_hw_status0); 5050 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5051 table.trm_hw_status1); 5052 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5053 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5054 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5055 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5056 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5057 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5058 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5059 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5060 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5061 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5062 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5063 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5064 table.fw_rev_type); 5065 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5066 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5067 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5068 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5069 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5070 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5071 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5072 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5073 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5074 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5075 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5076 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5077 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5078 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5079 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5080 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5081 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5082 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5083 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5084 5085 if (sc->umac_error_event_table) 5086 iwm_nic_umac_error(sc); 5087 } 5088 #endif 5089 5090 static void 5091 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m) 5092 { 5093 struct ieee80211com *ic = &sc->sc_ic; 5094 struct iwm_cmd_response *cresp; 5095 struct mbuf *m1; 5096 uint32_t offset = 0; 5097 uint32_t maxoff = IWM_RBUF_SIZE; 5098 uint32_t nextoff; 5099 boolean_t stolen = FALSE; 5100 5101 #define HAVEROOM(a) \ 5102 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff) 5103 5104 while (HAVEROOM(offset)) { 5105 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, 5106 offset); 5107 int qid, idx, code, len; 5108 5109 qid = pkt->hdr.qid; 5110 idx = pkt->hdr.idx; 5111 5112 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5113 5114 /* 5115 * randomly get these from the firmware, no idea why. 5116 * they at least seem harmless, so just ignore them for now 5117 */ 5118 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) || 5119 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) { 5120 break; 5121 } 5122 5123 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5124 "rx packet qid=%d idx=%d type=%x\n", 5125 qid & ~0x80, pkt->hdr.idx, code); 5126 5127 len = iwm_rx_packet_len(pkt); 5128 len += sizeof(uint32_t); /* account for status word */ 5129 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN); 5130 5131 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt); 5132 5133 switch (code) { 5134 case IWM_REPLY_RX_PHY_CMD: 5135 iwm_mvm_rx_rx_phy_cmd(sc, pkt); 5136 break; 5137 5138 case IWM_REPLY_RX_MPDU_CMD: { 5139 /* 5140 * If this is the last frame in the RX buffer, we 5141 * can directly feed the mbuf to the sharks here. 5142 */ 5143 struct iwm_rx_packet *nextpkt = mtodoff(m, 5144 struct iwm_rx_packet *, nextoff); 5145 if (!HAVEROOM(nextoff) || 5146 (nextpkt->hdr.code == 0 && 5147 (nextpkt->hdr.qid & ~0x80) == 0 && 5148 nextpkt->hdr.idx == 0) || 5149 (nextpkt->len_n_flags == 5150 htole32(IWM_FH_RSCSR_FRAME_INVALID))) { 5151 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) { 5152 stolen = FALSE; 5153 /* Make sure we abort the loop */ 5154 nextoff = maxoff; 5155 } 5156 break; 5157 } 5158 5159 /* 5160 * Use m_copym instead of m_split, because that 5161 * makes it easier to keep a valid rx buffer in 5162 * the ring, when iwm_mvm_rx_rx_mpdu() fails. 5163 * 5164 * We need to start m_copym() at offset 0, to get the 5165 * M_PKTHDR flag preserved. 5166 */ 5167 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 5168 if (m1) { 5169 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen)) 5170 stolen = TRUE; 5171 else 5172 m_freem(m1); 5173 } 5174 break; 5175 } 5176 5177 case IWM_TX_CMD: 5178 iwm_mvm_rx_tx_cmd(sc, pkt); 5179 break; 5180 5181 case IWM_MISSED_BEACONS_NOTIFICATION: { 5182 struct iwm_missed_beacons_notif *resp; 5183 int missed; 5184 5185 /* XXX look at mac_id to determine interface ID */ 5186 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5187 5188 resp = (void *)pkt->data; 5189 missed = le32toh(resp->consec_missed_beacons); 5190 5191 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5192 "%s: MISSED_BEACON: mac_id=%d, " 5193 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5194 "num_rx=%d\n", 5195 __func__, 5196 le32toh(resp->mac_id), 5197 le32toh(resp->consec_missed_beacons_since_last_rx), 5198 le32toh(resp->consec_missed_beacons), 5199 le32toh(resp->num_expected_beacons), 5200 le32toh(resp->num_recvd_beacons)); 5201 5202 /* Be paranoid */ 5203 if (vap == NULL) 5204 break; 5205 5206 /* XXX no net80211 locking? */ 5207 if (vap->iv_state == IEEE80211_S_RUN && 5208 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5209 if (missed > vap->iv_bmissthreshold) { 5210 /* XXX bad locking; turn into task */ 5211 IWM_UNLOCK(sc); 5212 ieee80211_beacon_miss(ic); 5213 IWM_LOCK(sc); 5214 } 5215 } 5216 5217 break; 5218 } 5219 5220 case IWM_MFUART_LOAD_NOTIFICATION: 5221 break; 5222 5223 case IWM_MVM_ALIVE: 5224 break; 5225 5226 case IWM_CALIB_RES_NOTIF_PHY_DB: 5227 break; 5228 5229 case IWM_STATISTICS_NOTIFICATION: 5230 iwm_mvm_handle_rx_statistics(sc, pkt); 5231 break; 5232 5233 case IWM_NVM_ACCESS_CMD: 5234 case IWM_MCC_UPDATE_CMD: 5235 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5236 memcpy(sc->sc_cmd_resp, 5237 pkt, sizeof(sc->sc_cmd_resp)); 5238 } 5239 break; 5240 5241 case IWM_MCC_CHUB_UPDATE_CMD: { 5242 struct iwm_mcc_chub_notif *notif; 5243 notif = (void *)pkt->data; 5244 5245 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5246 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5247 sc->sc_fw_mcc[2] = '\0'; 5248 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 5249 "fw source %d sent CC '%s'\n", 5250 notif->source_id, sc->sc_fw_mcc); 5251 break; 5252 } 5253 5254 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5255 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP, 5256 IWM_DTS_MEASUREMENT_NOTIF_WIDE): { 5257 struct iwm_dts_measurement_notif_v1 *notif; 5258 5259 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) { 5260 device_printf(sc->sc_dev, 5261 "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 5262 break; 5263 } 5264 notif = (void *)pkt->data; 5265 IWM_DPRINTF(sc, IWM_DEBUG_TEMP, 5266 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n", 5267 notif->temp); 5268 break; 5269 } 5270 5271 case IWM_PHY_CONFIGURATION_CMD: 5272 case IWM_TX_ANT_CONFIGURATION_CMD: 5273 case IWM_ADD_STA: 5274 case IWM_MAC_CONTEXT_CMD: 5275 case IWM_REPLY_SF_CFG_CMD: 5276 case IWM_POWER_TABLE_CMD: 5277 case IWM_LTR_CONFIG: 5278 case IWM_PHY_CONTEXT_CMD: 5279 case IWM_BINDING_CONTEXT_CMD: 5280 case IWM_TIME_EVENT_CMD: 5281 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5282 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5283 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC): 5284 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5285 case IWM_SCAN_OFFLOAD_ABORT_CMD: 5286 case IWM_REPLY_BEACON_FILTERING_CMD: 5287 case IWM_MAC_PM_POWER_TABLE: 5288 case IWM_TIME_QUOTA_CMD: 5289 case IWM_REMOVE_STA: 5290 case IWM_TXPATH_FLUSH: 5291 case IWM_LQ_CMD: 5292 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, 5293 IWM_FW_PAGING_BLOCK_CMD): 5294 case IWM_BT_CONFIG: 5295 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5296 cresp = (void *)pkt->data; 5297 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5298 memcpy(sc->sc_cmd_resp, 5299 pkt, sizeof(*pkt)+sizeof(*cresp)); 5300 } 5301 break; 5302 5303 /* ignore */ 5304 case IWM_PHY_DB_CMD: 5305 break; 5306 5307 case IWM_INIT_COMPLETE_NOTIF: 5308 break; 5309 5310 case IWM_SCAN_OFFLOAD_COMPLETE: 5311 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt); 5312 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5313 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5314 ieee80211_runtask(ic, &sc->sc_es_task); 5315 } 5316 break; 5317 5318 case IWM_SCAN_ITERATION_COMPLETE: { 5319 struct iwm_lmac_scan_complete_notif *notif; 5320 notif = (void *)pkt->data; 5321 break; 5322 } 5323 5324 case IWM_SCAN_COMPLETE_UMAC: 5325 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt); 5326 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5327 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5328 ieee80211_runtask(ic, &sc->sc_es_task); 5329 } 5330 break; 5331 5332 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5333 struct iwm_umac_scan_iter_complete_notif *notif; 5334 notif = (void *)pkt->data; 5335 5336 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5337 "complete, status=0x%x, %d channels scanned\n", 5338 notif->status, notif->scanned_channels); 5339 break; 5340 } 5341 5342 case IWM_REPLY_ERROR: { 5343 struct iwm_error_resp *resp; 5344 resp = (void *)pkt->data; 5345 5346 device_printf(sc->sc_dev, 5347 "firmware error 0x%x, cmd 0x%x\n", 5348 le32toh(resp->error_type), 5349 resp->cmd_id); 5350 break; 5351 } 5352 5353 case IWM_TIME_EVENT_NOTIFICATION: 5354 iwm_mvm_rx_time_event_notif(sc, pkt); 5355 break; 5356 5357 /* 5358 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 5359 * messages. Just ignore them for now. 5360 */ 5361 case IWM_DEBUG_LOG_MSG: 5362 break; 5363 5364 case IWM_MCAST_FILTER_CMD: 5365 break; 5366 5367 case IWM_SCD_QUEUE_CFG: { 5368 struct iwm_scd_txq_cfg_rsp *rsp; 5369 rsp = (void *)pkt->data; 5370 5371 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5372 "queue cfg token=0x%x sta_id=%d " 5373 "tid=%d scd_queue=%d\n", 5374 rsp->token, rsp->sta_id, rsp->tid, 5375 rsp->scd_queue); 5376 break; 5377 } 5378 5379 default: 5380 device_printf(sc->sc_dev, 5381 "frame %d/%d %x UNHANDLED (this should " 5382 "not happen)\n", qid & ~0x80, idx, 5383 pkt->len_n_flags); 5384 break; 5385 } 5386 5387 /* 5388 * Why test bit 0x80? The Linux driver: 5389 * 5390 * There is one exception: uCode sets bit 15 when it 5391 * originates the response/notification, i.e. when the 5392 * response/notification is not a direct response to a 5393 * command sent by the driver. For example, uCode issues 5394 * IWM_REPLY_RX when it sends a received frame to the driver; 5395 * it is not a direct response to any driver command. 5396 * 5397 * Ok, so since when is 7 == 15? Well, the Linux driver 5398 * uses a slightly different format for pkt->hdr, and "qid" 5399 * is actually the upper byte of a two-byte field. 5400 */ 5401 if (!(qid & (1 << 7))) 5402 iwm_cmd_done(sc, pkt); 5403 5404 offset = nextoff; 5405 } 5406 if (stolen) 5407 m_freem(m); 5408 #undef HAVEROOM 5409 } 5410 5411 /* 5412 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5413 * Basic structure from if_iwn 5414 */ 5415 static void 5416 iwm_notif_intr(struct iwm_softc *sc) 5417 { 5418 uint16_t hw; 5419 5420 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5421 BUS_DMASYNC_POSTREAD); 5422 5423 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5424 5425 /* 5426 * Process responses 5427 */ 5428 while (sc->rxq.cur != hw) { 5429 struct iwm_rx_ring *ring = &sc->rxq; 5430 struct iwm_rx_data *data = &ring->data[ring->cur]; 5431 5432 bus_dmamap_sync(ring->data_dmat, data->map, 5433 BUS_DMASYNC_POSTREAD); 5434 5435 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5436 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur); 5437 iwm_handle_rxb(sc, data->m); 5438 5439 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT; 5440 } 5441 5442 /* 5443 * Tell the firmware that it can reuse the ring entries that 5444 * we have just processed. 5445 * Seems like the hardware gets upset unless we align 5446 * the write by 8?? 5447 */ 5448 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1; 5449 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8)); 5450 } 5451 5452 static void 5453 iwm_intr(void *arg) 5454 { 5455 struct iwm_softc *sc = arg; 5456 int handled = 0; 5457 int r1, r2, rv = 0; 5458 int isperiodic = 0; 5459 5460 IWM_LOCK(sc); 5461 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5462 5463 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5464 uint32_t *ict = sc->ict_dma.vaddr; 5465 int tmp; 5466 5467 tmp = htole32(ict[sc->ict_cur]); 5468 if (!tmp) 5469 goto out_ena; 5470 5471 /* 5472 * ok, there was something. keep plowing until we have all. 5473 */ 5474 r1 = r2 = 0; 5475 while (tmp) { 5476 r1 |= tmp; 5477 ict[sc->ict_cur] = 0; 5478 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5479 tmp = htole32(ict[sc->ict_cur]); 5480 } 5481 5482 /* this is where the fun begins. don't ask */ 5483 if (r1 == 0xffffffff) 5484 r1 = 0; 5485 5486 /* i am not expected to understand this */ 5487 if (r1 & 0xc0000) 5488 r1 |= 0x8000; 5489 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5490 } else { 5491 r1 = IWM_READ(sc, IWM_CSR_INT); 5492 /* "hardware gone" (where, fishing?) */ 5493 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5494 goto out; 5495 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5496 } 5497 if (r1 == 0 && r2 == 0) { 5498 goto out_ena; 5499 } 5500 5501 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5502 5503 /* Safely ignore these bits for debug checks below */ 5504 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD); 5505 5506 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5507 int i; 5508 struct ieee80211com *ic = &sc->sc_ic; 5509 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5510 5511 #ifdef IWM_DEBUG 5512 iwm_nic_error(sc); 5513 #endif 5514 /* Dump driver status (TX and RX rings) while we're here. */ 5515 device_printf(sc->sc_dev, "driver status:\n"); 5516 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) { 5517 struct iwm_tx_ring *ring = &sc->txq[i]; 5518 device_printf(sc->sc_dev, 5519 " tx ring %2d: qid=%-2d cur=%-3d " 5520 "queued=%-3d\n", 5521 i, ring->qid, ring->cur, ring->queued); 5522 } 5523 device_printf(sc->sc_dev, 5524 " rx ring: cur=%d\n", sc->rxq.cur); 5525 device_printf(sc->sc_dev, 5526 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5527 5528 /* Reset our firmware state tracking. */ 5529 sc->sc_firmware_state = 0; 5530 /* Don't stop the device; just do a VAP restart */ 5531 IWM_UNLOCK(sc); 5532 5533 if (vap == NULL) { 5534 printf("%s: null vap\n", __func__); 5535 return; 5536 } 5537 5538 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5539 "restarting\n", __func__, vap->iv_state); 5540 5541 ieee80211_restart_all(ic); 5542 return; 5543 } 5544 5545 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5546 handled |= IWM_CSR_INT_BIT_HW_ERR; 5547 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5548 iwm_stop(sc); 5549 rv = 1; 5550 goto out; 5551 } 5552 5553 /* firmware chunk loaded */ 5554 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5555 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5556 handled |= IWM_CSR_INT_BIT_FH_TX; 5557 sc->sc_fw_chunk_done = 1; 5558 wakeup(&sc->sc_fw); 5559 } 5560 5561 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5562 handled |= IWM_CSR_INT_BIT_RF_KILL; 5563 if (iwm_check_rfkill(sc)) { 5564 device_printf(sc->sc_dev, 5565 "%s: rfkill switch, disabling interface\n", 5566 __func__); 5567 iwm_stop(sc); 5568 } 5569 } 5570 5571 /* 5572 * The Linux driver uses periodic interrupts to avoid races. 5573 * We cargo-cult like it's going out of fashion. 5574 */ 5575 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5576 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5577 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5578 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5579 IWM_WRITE_1(sc, 5580 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5581 isperiodic = 1; 5582 } 5583 5584 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5585 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5586 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5587 5588 iwm_notif_intr(sc); 5589 5590 /* enable periodic interrupt, see above */ 5591 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5592 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5593 IWM_CSR_INT_PERIODIC_ENA); 5594 } 5595 5596 if (__predict_false(r1 & ~handled)) 5597 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5598 "%s: unhandled interrupts: %x\n", __func__, r1); 5599 rv = 1; 5600 5601 out_ena: 5602 iwm_restore_interrupts(sc); 5603 out: 5604 IWM_UNLOCK(sc); 5605 return; 5606 } 5607 5608 /* 5609 * Autoconf glue-sniffing 5610 */ 5611 #define PCI_VENDOR_INTEL 0x8086 5612 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5613 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5614 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5615 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5616 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb 5617 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5618 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5619 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5620 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5621 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5622 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5623 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd 5624 5625 static const struct iwm_devices { 5626 uint16_t device; 5627 const struct iwm_cfg *cfg; 5628 } iwm_devices[] = { 5629 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg }, 5630 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg }, 5631 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg }, 5632 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg }, 5633 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg }, 5634 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg }, 5635 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg }, 5636 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg }, 5637 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg }, 5638 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg }, 5639 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg }, 5640 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg }, 5641 }; 5642 5643 static int 5644 iwm_probe(device_t dev) 5645 { 5646 int i; 5647 5648 for (i = 0; i < nitems(iwm_devices); i++) { 5649 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5650 pci_get_device(dev) == iwm_devices[i].device) { 5651 device_set_desc(dev, iwm_devices[i].cfg->name); 5652 return (BUS_PROBE_DEFAULT); 5653 } 5654 } 5655 5656 return (ENXIO); 5657 } 5658 5659 static int 5660 iwm_dev_check(device_t dev) 5661 { 5662 struct iwm_softc *sc; 5663 uint16_t devid; 5664 int i; 5665 5666 sc = device_get_softc(dev); 5667 5668 devid = pci_get_device(dev); 5669 for (i = 0; i < nitems(iwm_devices); i++) { 5670 if (iwm_devices[i].device == devid) { 5671 sc->cfg = iwm_devices[i].cfg; 5672 return (0); 5673 } 5674 } 5675 device_printf(dev, "unknown adapter type\n"); 5676 return ENXIO; 5677 } 5678 5679 /* PCI registers */ 5680 #define PCI_CFG_RETRY_TIMEOUT 0x041 5681 5682 static int 5683 iwm_pci_attach(device_t dev) 5684 { 5685 struct iwm_softc *sc; 5686 int count, error, rid; 5687 uint16_t reg; 5688 5689 sc = device_get_softc(dev); 5690 5691 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5692 * PCI Tx retries from interfering with C3 CPU state */ 5693 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 5694 5695 /* Enable bus-mastering and hardware bug workaround. */ 5696 pci_enable_busmaster(dev); 5697 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5698 /* if !MSI */ 5699 if (reg & PCIM_STATUS_INTxSTATE) { 5700 reg &= ~PCIM_STATUS_INTxSTATE; 5701 } 5702 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5703 5704 rid = PCIR_BAR(0); 5705 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5706 RF_ACTIVE); 5707 if (sc->sc_mem == NULL) { 5708 device_printf(sc->sc_dev, "can't map mem space\n"); 5709 return (ENXIO); 5710 } 5711 sc->sc_st = rman_get_bustag(sc->sc_mem); 5712 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5713 5714 /* Install interrupt handler. */ 5715 count = 1; 5716 rid = 0; 5717 if (pci_alloc_msi(dev, &count) == 0) 5718 rid = 1; 5719 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5720 (rid != 0 ? 0 : RF_SHAREABLE)); 5721 if (sc->sc_irq == NULL) { 5722 device_printf(dev, "can't map interrupt\n"); 5723 return (ENXIO); 5724 } 5725 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5726 NULL, iwm_intr, sc, &sc->sc_ih); 5727 if (sc->sc_ih == NULL) { 5728 device_printf(dev, "can't establish interrupt"); 5729 return (ENXIO); 5730 } 5731 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5732 5733 return (0); 5734 } 5735 5736 static void 5737 iwm_pci_detach(device_t dev) 5738 { 5739 struct iwm_softc *sc = device_get_softc(dev); 5740 5741 if (sc->sc_irq != NULL) { 5742 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5743 bus_release_resource(dev, SYS_RES_IRQ, 5744 rman_get_rid(sc->sc_irq), sc->sc_irq); 5745 pci_release_msi(dev); 5746 } 5747 if (sc->sc_mem != NULL) 5748 bus_release_resource(dev, SYS_RES_MEMORY, 5749 rman_get_rid(sc->sc_mem), sc->sc_mem); 5750 } 5751 5752 5753 5754 static int 5755 iwm_attach(device_t dev) 5756 { 5757 struct iwm_softc *sc = device_get_softc(dev); 5758 struct ieee80211com *ic = &sc->sc_ic; 5759 int error; 5760 int txq_i, i; 5761 5762 sc->sc_dev = dev; 5763 sc->sc_attached = 1; 5764 IWM_LOCK_INIT(sc); 5765 mbufq_init(&sc->sc_snd, ifqmaxlen); 5766 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5767 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0); 5768 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5769 5770 sc->sc_notif_wait = iwm_notification_wait_init(sc); 5771 if (sc->sc_notif_wait == NULL) { 5772 device_printf(dev, "failed to init notification wait struct\n"); 5773 goto fail; 5774 } 5775 5776 sc->sf_state = IWM_SF_UNINIT; 5777 5778 /* Init phy db */ 5779 sc->sc_phy_db = iwm_phy_db_init(sc); 5780 if (!sc->sc_phy_db) { 5781 device_printf(dev, "Cannot init phy_db\n"); 5782 goto fail; 5783 } 5784 5785 /* Set EBS as successful as long as not stated otherwise by the FW. */ 5786 sc->last_ebs_successful = TRUE; 5787 5788 /* PCI attach */ 5789 error = iwm_pci_attach(dev); 5790 if (error != 0) 5791 goto fail; 5792 5793 sc->sc_wantresp = -1; 5794 5795 /* Match device id */ 5796 error = iwm_dev_check(dev); 5797 if (error != 0) 5798 goto fail; 5799 5800 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 5801 /* 5802 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 5803 * changed, and now the revision step also includes bit 0-1 (no more 5804 * "dash" value). To keep hw_rev backwards compatible - we'll store it 5805 * in the old format. 5806 */ 5807 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 5808 int ret; 5809 uint32_t hw_step; 5810 5811 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 5812 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 5813 5814 if (iwm_prepare_card_hw(sc) != 0) { 5815 device_printf(dev, "could not initialize hardware\n"); 5816 goto fail; 5817 } 5818 5819 /* 5820 * In order to recognize C step the driver should read the 5821 * chip version id located at the AUX bus MISC address. 5822 */ 5823 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 5824 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 5825 DELAY(2); 5826 5827 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 5828 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5829 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5830 25000); 5831 if (!ret) { 5832 device_printf(sc->sc_dev, 5833 "Failed to wake up the nic\n"); 5834 goto fail; 5835 } 5836 5837 if (iwm_nic_lock(sc)) { 5838 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 5839 hw_step |= IWM_ENABLE_WFPM; 5840 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 5841 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 5842 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 5843 if (hw_step == 0x3) 5844 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 5845 (IWM_SILICON_C_STEP << 2); 5846 iwm_nic_unlock(sc); 5847 } else { 5848 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 5849 goto fail; 5850 } 5851 } 5852 5853 /* special-case 7265D, it has the same PCI IDs. */ 5854 if (sc->cfg == &iwm7265_cfg && 5855 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) { 5856 sc->cfg = &iwm7265d_cfg; 5857 } 5858 5859 /* Allocate DMA memory for firmware transfers. */ 5860 if ((error = iwm_alloc_fwmem(sc)) != 0) { 5861 device_printf(dev, "could not allocate memory for firmware\n"); 5862 goto fail; 5863 } 5864 5865 /* Allocate "Keep Warm" page. */ 5866 if ((error = iwm_alloc_kw(sc)) != 0) { 5867 device_printf(dev, "could not allocate keep warm page\n"); 5868 goto fail; 5869 } 5870 5871 /* We use ICT interrupts */ 5872 if ((error = iwm_alloc_ict(sc)) != 0) { 5873 device_printf(dev, "could not allocate ICT table\n"); 5874 goto fail; 5875 } 5876 5877 /* Allocate TX scheduler "rings". */ 5878 if ((error = iwm_alloc_sched(sc)) != 0) { 5879 device_printf(dev, "could not allocate TX scheduler rings\n"); 5880 goto fail; 5881 } 5882 5883 /* Allocate TX rings */ 5884 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 5885 if ((error = iwm_alloc_tx_ring(sc, 5886 &sc->txq[txq_i], txq_i)) != 0) { 5887 device_printf(dev, 5888 "could not allocate TX ring %d\n", 5889 txq_i); 5890 goto fail; 5891 } 5892 } 5893 5894 /* Allocate RX ring. */ 5895 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 5896 device_printf(dev, "could not allocate RX ring\n"); 5897 goto fail; 5898 } 5899 5900 /* Clear pending interrupts. */ 5901 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 5902 5903 ic->ic_softc = sc; 5904 ic->ic_name = device_get_nameunit(sc->sc_dev); 5905 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 5906 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 5907 5908 /* Set device capabilities. */ 5909 ic->ic_caps = 5910 IEEE80211_C_STA | 5911 IEEE80211_C_WPA | /* WPA/RSN */ 5912 IEEE80211_C_WME | 5913 IEEE80211_C_PMGT | 5914 IEEE80211_C_SHSLOT | /* short slot time supported */ 5915 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 5916 // IEEE80211_C_BGSCAN /* capable of bg scanning */ 5917 ; 5918 /* Advertise full-offload scanning */ 5919 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 5920 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 5921 sc->sc_phyctxt[i].id = i; 5922 sc->sc_phyctxt[i].color = 0; 5923 sc->sc_phyctxt[i].ref = 0; 5924 sc->sc_phyctxt[i].channel = NULL; 5925 } 5926 5927 /* Default noise floor */ 5928 sc->sc_noise = -96; 5929 5930 /* Max RSSI */ 5931 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 5932 5933 #ifdef IWM_DEBUG 5934 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 5935 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 5936 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 5937 #endif 5938 5939 error = iwm_read_firmware(sc); 5940 if (error) { 5941 goto fail; 5942 } else if (sc->sc_fw.fw_fp == NULL) { 5943 /* 5944 * XXX Add a solution for properly deferring firmware load 5945 * during bootup. 5946 */ 5947 goto fail; 5948 } else { 5949 sc->sc_preinit_hook.ich_func = iwm_preinit; 5950 sc->sc_preinit_hook.ich_arg = sc; 5951 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 5952 device_printf(dev, 5953 "config_intrhook_establish failed\n"); 5954 goto fail; 5955 } 5956 } 5957 5958 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 5959 "<-%s\n", __func__); 5960 5961 return 0; 5962 5963 /* Free allocated memory if something failed during attachment. */ 5964 fail: 5965 iwm_detach_local(sc, 0); 5966 5967 return ENXIO; 5968 } 5969 5970 static int 5971 iwm_is_valid_ether_addr(uint8_t *addr) 5972 { 5973 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 5974 5975 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 5976 return (FALSE); 5977 5978 return (TRUE); 5979 } 5980 5981 static int 5982 iwm_wme_update(struct ieee80211com *ic) 5983 { 5984 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5985 struct iwm_softc *sc = ic->ic_softc; 5986 struct chanAccParams chp; 5987 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5988 struct iwm_vap *ivp = IWM_VAP(vap); 5989 struct iwm_node *in; 5990 struct wmeParams tmp[WME_NUM_AC]; 5991 int aci, error; 5992 5993 if (vap == NULL) 5994 return (0); 5995 5996 ieee80211_wme_ic_getparams(ic, &chp); 5997 5998 IEEE80211_LOCK(ic); 5999 for (aci = 0; aci < WME_NUM_AC; aci++) 6000 tmp[aci] = chp.cap_wmeParams[aci]; 6001 IEEE80211_UNLOCK(ic); 6002 6003 IWM_LOCK(sc); 6004 for (aci = 0; aci < WME_NUM_AC; aci++) { 6005 const struct wmeParams *ac = &tmp[aci]; 6006 ivp->queue_params[aci].aifsn = ac->wmep_aifsn; 6007 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin); 6008 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax); 6009 ivp->queue_params[aci].edca_txop = 6010 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit); 6011 } 6012 ivp->have_wme = TRUE; 6013 if (ivp->is_uploaded && vap->iv_bss != NULL) { 6014 in = IWM_NODE(vap->iv_bss); 6015 if (in->in_assoc) { 6016 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 6017 device_printf(sc->sc_dev, 6018 "%s: failed to update MAC\n", __func__); 6019 } 6020 } 6021 } 6022 IWM_UNLOCK(sc); 6023 6024 return (0); 6025 #undef IWM_EXP2 6026 } 6027 6028 static void 6029 iwm_preinit(void *arg) 6030 { 6031 struct iwm_softc *sc = arg; 6032 device_t dev = sc->sc_dev; 6033 struct ieee80211com *ic = &sc->sc_ic; 6034 int error; 6035 6036 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6037 "->%s\n", __func__); 6038 6039 IWM_LOCK(sc); 6040 if ((error = iwm_start_hw(sc)) != 0) { 6041 device_printf(dev, "could not initialize hardware\n"); 6042 IWM_UNLOCK(sc); 6043 goto fail; 6044 } 6045 6046 error = iwm_run_init_mvm_ucode(sc, 1); 6047 iwm_stop_device(sc); 6048 if (error) { 6049 IWM_UNLOCK(sc); 6050 goto fail; 6051 } 6052 device_printf(dev, 6053 "hw rev 0x%x, fw ver %s, address %s\n", 6054 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6055 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr)); 6056 6057 /* not all hardware can do 5GHz band */ 6058 if (!sc->nvm_data->sku_cap_band_52GHz_enable) 6059 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6060 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6061 IWM_UNLOCK(sc); 6062 6063 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6064 ic->ic_channels); 6065 6066 /* 6067 * At this point we've committed - if we fail to do setup, 6068 * we now also have to tear down the net80211 state. 6069 */ 6070 ieee80211_ifattach(ic); 6071 ic->ic_vap_create = iwm_vap_create; 6072 ic->ic_vap_delete = iwm_vap_delete; 6073 ic->ic_raw_xmit = iwm_raw_xmit; 6074 ic->ic_node_alloc = iwm_node_alloc; 6075 ic->ic_scan_start = iwm_scan_start; 6076 ic->ic_scan_end = iwm_scan_end; 6077 ic->ic_update_mcast = iwm_update_mcast; 6078 ic->ic_getradiocaps = iwm_init_channel_map; 6079 ic->ic_set_channel = iwm_set_channel; 6080 ic->ic_scan_curchan = iwm_scan_curchan; 6081 ic->ic_scan_mindwell = iwm_scan_mindwell; 6082 ic->ic_wme.wme_update = iwm_wme_update; 6083 ic->ic_parent = iwm_parent; 6084 ic->ic_transmit = iwm_transmit; 6085 iwm_radiotap_attach(sc); 6086 if (bootverbose) 6087 ieee80211_announce(ic); 6088 6089 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6090 "<-%s\n", __func__); 6091 config_intrhook_disestablish(&sc->sc_preinit_hook); 6092 6093 return; 6094 fail: 6095 config_intrhook_disestablish(&sc->sc_preinit_hook); 6096 iwm_detach_local(sc, 0); 6097 } 6098 6099 /* 6100 * Attach the interface to 802.11 radiotap. 6101 */ 6102 static void 6103 iwm_radiotap_attach(struct iwm_softc *sc) 6104 { 6105 struct ieee80211com *ic = &sc->sc_ic; 6106 6107 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6108 "->%s begin\n", __func__); 6109 ieee80211_radiotap_attach(ic, 6110 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6111 IWM_TX_RADIOTAP_PRESENT, 6112 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6113 IWM_RX_RADIOTAP_PRESENT); 6114 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6115 "->%s end\n", __func__); 6116 } 6117 6118 static struct ieee80211vap * 6119 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6120 enum ieee80211_opmode opmode, int flags, 6121 const uint8_t bssid[IEEE80211_ADDR_LEN], 6122 const uint8_t mac[IEEE80211_ADDR_LEN]) 6123 { 6124 struct iwm_vap *ivp; 6125 struct ieee80211vap *vap; 6126 6127 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6128 return NULL; 6129 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO); 6130 vap = &ivp->iv_vap; 6131 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6132 vap->iv_bmissthreshold = 10; /* override default */ 6133 /* Override with driver methods. */ 6134 ivp->iv_newstate = vap->iv_newstate; 6135 vap->iv_newstate = iwm_newstate; 6136 6137 ivp->id = IWM_DEFAULT_MACID; 6138 ivp->color = IWM_DEFAULT_COLOR; 6139 6140 ivp->have_wme = FALSE; 6141 ivp->ps_disabled = FALSE; 6142 6143 ieee80211_ratectl_init(vap); 6144 /* Complete setup. */ 6145 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status, 6146 mac); 6147 ic->ic_opmode = opmode; 6148 6149 return vap; 6150 } 6151 6152 static void 6153 iwm_vap_delete(struct ieee80211vap *vap) 6154 { 6155 struct iwm_vap *ivp = IWM_VAP(vap); 6156 6157 ieee80211_ratectl_deinit(vap); 6158 ieee80211_vap_detach(vap); 6159 free(ivp, M_80211_VAP); 6160 } 6161 6162 static void 6163 iwm_xmit_queue_drain(struct iwm_softc *sc) 6164 { 6165 struct mbuf *m; 6166 struct ieee80211_node *ni; 6167 6168 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 6169 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 6170 ieee80211_free_node(ni); 6171 m_freem(m); 6172 } 6173 } 6174 6175 static void 6176 iwm_scan_start(struct ieee80211com *ic) 6177 { 6178 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6179 struct iwm_softc *sc = ic->ic_softc; 6180 int error; 6181 6182 IWM_LOCK(sc); 6183 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6184 /* This should not be possible */ 6185 device_printf(sc->sc_dev, 6186 "%s: Previous scan not completed yet\n", __func__); 6187 } 6188 if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6189 error = iwm_mvm_umac_scan(sc); 6190 else 6191 error = iwm_mvm_lmac_scan(sc); 6192 if (error != 0) { 6193 device_printf(sc->sc_dev, "could not initiate scan\n"); 6194 IWM_UNLOCK(sc); 6195 ieee80211_cancel_scan(vap); 6196 } else { 6197 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING; 6198 iwm_led_blink_start(sc); 6199 IWM_UNLOCK(sc); 6200 } 6201 } 6202 6203 static void 6204 iwm_scan_end(struct ieee80211com *ic) 6205 { 6206 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6207 struct iwm_softc *sc = ic->ic_softc; 6208 6209 IWM_LOCK(sc); 6210 iwm_led_blink_stop(sc); 6211 if (vap->iv_state == IEEE80211_S_RUN) 6212 iwm_mvm_led_enable(sc); 6213 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6214 /* 6215 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because 6216 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq 6217 * taskqueue. 6218 */ 6219 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 6220 iwm_mvm_scan_stop_wait(sc); 6221 } 6222 IWM_UNLOCK(sc); 6223 6224 /* 6225 * Make sure we don't race, if sc_es_task is still enqueued here. 6226 * This is to make sure that it won't call ieee80211_scan_done 6227 * when we have already started the next scan. 6228 */ 6229 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL); 6230 } 6231 6232 static void 6233 iwm_update_mcast(struct ieee80211com *ic) 6234 { 6235 } 6236 6237 static void 6238 iwm_set_channel(struct ieee80211com *ic) 6239 { 6240 } 6241 6242 static void 6243 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6244 { 6245 } 6246 6247 static void 6248 iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6249 { 6250 return; 6251 } 6252 6253 void 6254 iwm_init_task(void *arg1) 6255 { 6256 struct iwm_softc *sc = arg1; 6257 6258 IWM_LOCK(sc); 6259 while (sc->sc_flags & IWM_FLAG_BUSY) 6260 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6261 sc->sc_flags |= IWM_FLAG_BUSY; 6262 iwm_stop(sc); 6263 if (sc->sc_ic.ic_nrunning > 0) 6264 iwm_init(sc); 6265 sc->sc_flags &= ~IWM_FLAG_BUSY; 6266 wakeup(&sc->sc_flags); 6267 IWM_UNLOCK(sc); 6268 } 6269 6270 static int 6271 iwm_resume(device_t dev) 6272 { 6273 struct iwm_softc *sc = device_get_softc(dev); 6274 int do_reinit = 0; 6275 6276 /* 6277 * We disable the RETRY_TIMEOUT register (0x41) to keep 6278 * PCI Tx retries from interfering with C3 CPU state. 6279 */ 6280 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 6281 6282 if (!sc->sc_attached) 6283 return 0; 6284 6285 iwm_init_task(device_get_softc(dev)); 6286 6287 IWM_LOCK(sc); 6288 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6289 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6290 do_reinit = 1; 6291 } 6292 IWM_UNLOCK(sc); 6293 6294 if (do_reinit) 6295 ieee80211_resume_all(&sc->sc_ic); 6296 6297 return 0; 6298 } 6299 6300 static int 6301 iwm_suspend(device_t dev) 6302 { 6303 int do_stop = 0; 6304 struct iwm_softc *sc = device_get_softc(dev); 6305 6306 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6307 6308 if (!sc->sc_attached) 6309 return (0); 6310 6311 ieee80211_suspend_all(&sc->sc_ic); 6312 6313 if (do_stop) { 6314 IWM_LOCK(sc); 6315 iwm_stop(sc); 6316 sc->sc_flags |= IWM_FLAG_SCANNING; 6317 IWM_UNLOCK(sc); 6318 } 6319 6320 return (0); 6321 } 6322 6323 static int 6324 iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6325 { 6326 struct iwm_fw_info *fw = &sc->sc_fw; 6327 device_t dev = sc->sc_dev; 6328 int i; 6329 6330 if (!sc->sc_attached) 6331 return 0; 6332 sc->sc_attached = 0; 6333 if (do_net80211) { 6334 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task); 6335 } 6336 iwm_stop_device(sc); 6337 if (do_net80211) { 6338 IWM_LOCK(sc); 6339 iwm_xmit_queue_drain(sc); 6340 IWM_UNLOCK(sc); 6341 ieee80211_ifdetach(&sc->sc_ic); 6342 } 6343 callout_drain(&sc->sc_led_blink_to); 6344 callout_drain(&sc->sc_watchdog_to); 6345 6346 iwm_phy_db_free(sc->sc_phy_db); 6347 sc->sc_phy_db = NULL; 6348 6349 iwm_free_nvm_data(sc->nvm_data); 6350 6351 /* Free descriptor rings */ 6352 iwm_free_rx_ring(sc, &sc->rxq); 6353 for (i = 0; i < nitems(sc->txq); i++) 6354 iwm_free_tx_ring(sc, &sc->txq[i]); 6355 6356 /* Free firmware */ 6357 if (fw->fw_fp != NULL) 6358 iwm_fw_info_free(fw); 6359 6360 /* Free scheduler */ 6361 iwm_dma_contig_free(&sc->sched_dma); 6362 iwm_dma_contig_free(&sc->ict_dma); 6363 iwm_dma_contig_free(&sc->kw_dma); 6364 iwm_dma_contig_free(&sc->fw_dma); 6365 6366 iwm_free_fw_paging(sc); 6367 6368 /* Finished with the hardware - detach things */ 6369 iwm_pci_detach(dev); 6370 6371 if (sc->sc_notif_wait != NULL) { 6372 iwm_notification_wait_free(sc->sc_notif_wait); 6373 sc->sc_notif_wait = NULL; 6374 } 6375 6376 IWM_LOCK_DESTROY(sc); 6377 6378 return (0); 6379 } 6380 6381 static int 6382 iwm_detach(device_t dev) 6383 { 6384 struct iwm_softc *sc = device_get_softc(dev); 6385 6386 return (iwm_detach_local(sc, 1)); 6387 } 6388 6389 static device_method_t iwm_pci_methods[] = { 6390 /* Device interface */ 6391 DEVMETHOD(device_probe, iwm_probe), 6392 DEVMETHOD(device_attach, iwm_attach), 6393 DEVMETHOD(device_detach, iwm_detach), 6394 DEVMETHOD(device_suspend, iwm_suspend), 6395 DEVMETHOD(device_resume, iwm_resume), 6396 6397 DEVMETHOD_END 6398 }; 6399 6400 static driver_t iwm_pci_driver = { 6401 "iwm", 6402 iwm_pci_methods, 6403 sizeof (struct iwm_softc) 6404 }; 6405 6406 static devclass_t iwm_devclass; 6407 6408 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL); 6409 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver, 6410 iwm_devices, nitems(iwm_devices)); 6411 MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6412 MODULE_DEPEND(iwm, pci, 1, 1, 1); 6413 MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6414